diff --git "a/pytest-dev__pytest-5221/docstore.json" "b/pytest-dev__pytest-5221/docstore.json" new file mode 100644--- /dev/null +++ "b/pytest-dev__pytest-5221/docstore.json" @@ -0,0 +1 @@ +{"docstore/metadata": {"/tmp/repos/swe-bench_pytest-dev__pytest/bench/bench.py__": {"doc_hash": "d114fdceeebcf8d19e903babc9094a7f6d9e9f6a6e08aa1a59af3fee28365f45"}, "/tmp/repos/swe-bench_pytest-dev__pytest/bench/bench_argcomplete.py__10000_iterations_just__": {"doc_hash": "43681c03a41220198355ebefe53fedd15dc65a8cb8ae9c6359b439e89c2dd6a2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/bench/empty.py__": {"doc_hash": "08407b07833a948b1cac17b89af076d6b692dd789468eafc0b28827a18552fab"}, "/tmp/repos/swe-bench_pytest-dev__pytest/bench/manyparam.py__": {"doc_hash": "c75da13215e59b279495be852109d053884f11cf8e0a86a54df5cc34e653bfaa"}, "/tmp/repos/swe-bench_pytest-dev__pytest/bench/skip.py__": {"doc_hash": "d929a0d14f7829f0ae937a15e6bdfecf2cc10d75a5af51a51d7758ecdaa712ab"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/_themes/flask_theme_support.py__flasky_extensions_fla_": {"doc_hash": "f3e4175c7f1688e29ed0c430c104f54f4fe5fe7248634d4c4ba34baa4941fdba"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/conf.py__coding_utf_8__add_module_names.False": {"doc_hash": "cdecd9d675e4f6a1151fa10a5869c846676073cddf888e884b94a7a56758233e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/conf.py_pygments_style__the_title_page_": {"doc_hash": "a1f2054fd5afa0f3043cd37a731b95e02a7e6d71991914e66333df8fc6181ef8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/conf.py_latex_logo_": {"doc_hash": "448a3cb1df3a12678a86406b7cef1235c57cd86b280948d324898a65472aa934"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/conftest.py__": {"doc_hash": "78f878a023ce5824b4e5caf19869dfe59b2701f86bfc976df31bf9d700c0e602"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/failure_demo.py_TestSpecialisedExplanations_TestSpecialisedExplanations.test_eq_attrs.assert_left_right": {"doc_hash": "a2a3d5e91ea750139fa626066168142772b99f6c4c2abe9314e942034e9c7edc"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/failure_demo.py_test_attribute_globf.return.x_1": {"doc_hash": "38728dccea7270e516c6211e3dd919e216a93470ac7ffbd158e1f3695b4b3aac"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/failure_demo.py_TestRaises_test_dynamic_compile_shows_nicely.module_foo_": {"doc_hash": "7f3f3a19673399c25a5ab39feab90b504ad4142e3bdab4ee39c0b80b713bbfca"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/failure_demo.py_TestMoreErrors_TestMoreErrors.test_try_finally.try_.finally_.x.0": {"doc_hash": "edfedc27a54327e515bdefea0e60846e0c376bf729b68452c82f57d166630eee"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/failure_demo.py_TestCustomAssertMsg_": {"doc_hash": "9d6bf0e759b73bd03cc9bf5853bc73de0cdb0666b1fb1dbe2436b67fad24c7b3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/global_testmodule_config/conftest.py__": {"doc_hash": "f391ff8ecdb84a93e9bc5f937128d1a2d3143a9f88e5c9a924f97811d1321898"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/global_testmodule_config/test_hello_world.py__": {"doc_hash": "5f711a050ec0846d52b9a932639a334a204e0263b81f6d7d248ceb091dc695d9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/test_failures.py__": {"doc_hash": "03f657972df80701a792c059abffdf66a8d2cfa6f8f1c5a5cc0569c60449fef9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/test_setup_flow_example.py_setup_module_": {"doc_hash": "b70bc3a99ebdd784f085b627cc28665a36df6f427e4eb696b007f1c9c6ff5ad0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/conftest.py__": {"doc_hash": "07d8046da363766ce73d6453fd04ebfbc318e2cd895cf13db5e3f52c58a802b1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/costlysetup/conftest.py__": {"doc_hash": "a4d3a58f02e53587c70bc9fa1aa046565063bff6667fcfccb734b0a41e5c6159"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/costlysetup/sub_a/__init__.py__": {"doc_hash": "b575e959f178ecbf79a2450fa794fc752153ee9aad67d2e11464b3cb9f328930"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/costlysetup/sub_a/test_quick.py__": {"doc_hash": "263704620d563db4d2f95f9a18d4cecd52e6eec92c28aa48816facbbce2d6125"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/costlysetup/sub_b/__init__.py__": {"doc_hash": "f3c622b0f6f8b3ec9a97413263eba708296c4986ae7e5a32cb75e8850006b875"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/costlysetup/sub_b/test_two.py__": {"doc_hash": "e905521bc0a8a43e61efdf023d027093a1540ed4568b368bdbbddf3b2fd4676f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/multipython.py___python2.return.Python_request_param_pyt": {"doc_hash": "8bb2be2862a7bd95b78c4e1b2cf366cc7967d3091763733ee8d4760e2fb663ec"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/multipython.py_Python_Python.dumps.subprocess_check_call_se": {"doc_hash": "5c15d20f09166811e458caad7f01b1cb4d274c9e22388fb533e37f8e842e1f93"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/multipython.py_Python.load_and_is_true_": {"doc_hash": "2f3a63ee253c1a1d8679ee8543d14326a20af81123a0f568075db5feb6d228ac"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/nonpython/conftest.py__content_of_conftest_py_": {"doc_hash": "a6f5db770f9487826114442bfd6acfc6236d38e06bc898abb5050328516e307e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/py2py3/conftest.py__": {"doc_hash": "e97f8b6e0d35a18051bc053012574f9768bd7a7e723e49e93751c860891e5d72"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/py2py3/test_py2.py__": {"doc_hash": "ff06e7301299c485bab3e3e8b164d5b94cead6fba299cd9912e832fe9f44959a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/py2py3/test_py3.py__": {"doc_hash": "c6113ef36c1b66c4665022ffa820d590db8d42fa11f5c3087277e754178ce5aa"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/pythoncollection.py__": {"doc_hash": "468ebc07b00af2ab2076cf25b8e2f269819c3b89dab4e83c47e0ce1e38f92585"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/xfail_demo.py_pytest_": {"doc_hash": "3ac391098869f92add628935843f4e28c0ea2b8da083cbaf382540a93834366d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/extra/get_issues.py_json_get_issues.while_1_.if_not_another_page_.return.issues": {"doc_hash": "e156e1e4d2188400b60c469922efa26c0f39b295049d02e33c6fa897effe0393"}, "/tmp/repos/swe-bench_pytest-dev__pytest/extra/get_issues.py_main__get_kind.return._issue_": {"doc_hash": "56da9d787c8e25a51b7917ffbbb55873d9db3f8edfa1c5de20086d7d6411d1bf"}, "/tmp/repos/swe-bench_pytest-dev__pytest/extra/get_issues.py_report_": {"doc_hash": "df5d9d3b97113de65d2e98e1e2c1d3ecfbda2bb9178bbbeba813f408e7f30db2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/extra/setup-py.test/setup.py__": {"doc_hash": "750c0e34042d6b845fdfd9f7247bf184d101f2299d5624074229d3ae0786b0ae"}, "/tmp/repos/swe-bench_pytest-dev__pytest/scripts/release.py___announce.check_call_git_add_": {"doc_hash": "a39db2409d007de57602d707c97a887ed9f180cb74e0e76cbeab6d9557aed507"}, "/tmp/repos/swe-bench_pytest-dev__pytest/scripts/release.py_regen_": {"doc_hash": "27e5547b1fbc2493eeefc20ee1a174131c608027090388b8e4c7ac779845e9a8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/setup.py_from_setuptools_import_se_INSTALL_REQUIRES._": {"doc_hash": "0d4424b87dfb0e9e49ad921f3104d11d48709812aaed86c6883f7a78357f9c40"}, "/tmp/repos/swe-bench_pytest-dev__pytest/setup.py_main_": {"doc_hash": "03fb7b4537229cb5031e70a97570be97698f03d63a56f97f5203dc753c0c1077"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/__init__.py__": {"doc_hash": "bf6203ffa029a7fad23b4ef334bf1750eae101c8157d4767b6da2daf40153e5e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_argcomplete.py__allow_bash_completion__from_glob_import_glob": {"doc_hash": "f7defbbb0ff6925c2d4b1cafe8fee58e96f837a22e12e4d48acc7758d3447060"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_argcomplete.py_FastFilesCompleter_": {"doc_hash": "d12e44d40d6ee9fe4226fb6137dfdf5caa3df2a839a0fc38ef1b6db5c6ee96c4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/__init__.py__python_inspection_cod_": {"doc_hash": "d9828b95efbc8d62d651b62fb3c48d351dca38ff6676c04a757c1e13b4bc1c05"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/_py2traceback.py__copied_from_python_2_7__format_exception_only.return.lines": {"doc_hash": "9068639ad6efe32adf4eda9b5b873d39b5c0459158214c885773dbf4dfa7ec40"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/_py2traceback.py__format_final_exc_line_": {"doc_hash": "ca5cf46df410394041e78fd609c58c442393bd7b3abda96b1167def67204c081"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_from___future___import_ab_if__PY3_.else_.format_exception_only": {"doc_hash": "435fce58c710eea17874e9b1f9ecd785226dc6ded793612f2603314b6ac4d186"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_Code_Code.getargs.return.raw_co_varnames_argcount": {"doc_hash": "120ff74672a93761774b3f1c4ffa3ae7a0c0f3c057043c95df492efa2b6f25d4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_Frame_Frame.getargs.return.retval": {"doc_hash": "a9b58fd261d4a5473fdbafff2610bec9faadc0510619085831c4ad20f6acc219"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_TracebackEntry_TracebackEntry.getfirstlinesource.return.max_self_frame_code_first": {"doc_hash": "ab7434373d22b15aff7ac167c7b5a3fe679a1a9c1e1e3f6f1818de76b777b13a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_TracebackEntry.getsource_TracebackEntry.getsource.return.source_start_end_": {"doc_hash": "86b25bdb8cdde771da020a22b139bf383e648b3015821cce6f26093cd92e4d16"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_TracebackEntry.source_TracebackEntry.ishidden.return.tbh": {"doc_hash": "a24fd70ad2d65e9c957dfaa2f9c219fa91b7cb08c7da6b1ae9ecdcdd4d3b8dfc"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_TracebackEntry.__str___TracebackEntry.name.property_name_None_None": {"doc_hash": "069ba334575162cac3ff59b64de36387d896f6588a4ea35d535f740770827c50"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_Traceback_Traceback.__init__.if_hasattr_tb_tb_next_.else_.list___init___self_tb_": {"doc_hash": "c132a2b7bd1f75062cbe803803a299e719f9f297fac9a0545941f6fe1eb099aa"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_Traceback.cut_Traceback.cut.return.self": {"doc_hash": "6adf629e5fde33b7ffcaf558a2b66f8ae9ac517725fea86edb8071679ac0378c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_Traceback.__getitem___Traceback.getcrashentry.return.self_1_": {"doc_hash": "a09517081a7c9f494bcbf9af9bda7d4ee95003b082bc8dac567a2ea93aab566c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_Traceback.recursionindex_co_equal.compile_": {"doc_hash": "cfbe75d0563f600c01c473b58c90090fd9649b9136c2edf8e4586a1cb3280f5b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ExceptionInfo_ExceptionInfo.from_current.return.cls_tup__striptext_": {"doc_hash": "cf4989265430b244ec41a96261b14605b432469c20513efd0818861bfb0938fb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ExceptionInfo.for_later_ExceptionInfo.__repr__.return._ExceptionInfo_s_tblen_": {"doc_hash": "61eea78d96e9a2d77c875d45ea211801228384c51bdd3f6bfd0b183ecf76a9ce"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ExceptionInfo.exconly_ExceptionInfo._getreprcrash.return.ReprFileLocation_path_li": {"doc_hash": "182d857ffc8de72a6115e61f109122239d26714e2742dace9eaea180311cd7fe"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ExceptionInfo.getrepr_ExceptionInfo.getrepr.return.fmt_repr_excinfo_self_": {"doc_hash": "9b6c5474d47eb28a1871a3cbf475f79f8d4957d123d83d834efe6fa523582887"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ExceptionInfo.__str___ExceptionInfo.match.return.True": {"doc_hash": "e51728b534fbd8fc458e5e6a1db53e871433953d64f2889d194de7017fdb23f7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_FormattedExcinfo_FormattedExcinfo.repr_args.if_self_funcargs_.return.ReprFuncArgs_args_": {"doc_hash": "61b96de4b281a8a0c3d494c49c697cd7fbf050de625dc630ead1022e803688d7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_FormattedExcinfo.get_source_FormattedExcinfo.get_exconly.return.lines": {"doc_hash": "a34dcf2a57afca58f5f4a870114249d04d444bf2338aa7c21d3fc970dc6c53f1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_FormattedExcinfo.repr_locals_FormattedExcinfo.repr_locals.if_self_showlocals_.return.ReprLocals_lines_": {"doc_hash": "17c4a5d605a09d129844ae8f3b4ab20a15d059e4666dfb703c5181e3416c9330"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_FormattedExcinfo.repr_traceback_entry_FormattedExcinfo.repr_traceback_entry.return.ReprEntry_lines_None_No": {"doc_hash": "763312d4bc5b9e9abdf0cbcc3d154c6baa65dfa711b1a5ab4bf1d17d755d5797"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_FormattedExcinfo._makepath_FormattedExcinfo.repr_traceback.return.ReprTraceback_entries_ex": {"doc_hash": "f91938d82ddcea1bf1ea0a085d203dbb8c137d28f8d5a08df3bb663bfb475755"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_FormattedExcinfo._truncate_recursive_traceback_FormattedExcinfo._truncate_recursive_traceback.return.traceback_extraline": {"doc_hash": "387f37c2b21ff7e5a0c4ab4b5e84ad3530b747d3b1df95c776b952c8222ddb95"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_FormattedExcinfo.repr_excinfo_FormattedExcinfo.repr_excinfo.if__PY2_.else_.return.ExceptionChainRepr_repr_c": {"doc_hash": "c8ffd36301b62309b94f7b720d79339bd437a4552b69d7da96aa711e44b54dad"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_TerminalRepr_ExceptionRepr.toterminal.for_name_content_sep_in.tw_line_content_": {"doc_hash": "bd32bef3d07a949adaf773a1bcd99b56959ba535e58845dc4735734c7c627720"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ExceptionChainRepr_ReprExceptionInfo.toterminal.super_ReprExceptionInfo_": {"doc_hash": "187c6a6391ffe1fd489cad38d245ed65b21051ae80eec5fe4cc872f35beaa4aa"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ReprTraceback_ReprEntryNative.toterminal.tw_write_join_self_lin": {"doc_hash": "04fd55dca4abb2ba3715c314ba90622ab7d6954c7d1f4bf90683c120ef7cdfc4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ReprEntry_ReprEntry.__str__.return._s_n_s_n_s_n_join": {"doc_hash": "8ec550f52cfcf9518094bb01141630aae070c692f01d961e8d87b92ec7147cce"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ReprFileLocation_ReprLocals.toterminal.for_line_in_self_lines_.tw_line_line_": {"doc_hash": "f90388b5fe7ae4474e5a5f9f8bda2ff1a40833c5c527ebf9235f11d7d0f0f37b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ReprFuncArgs_ReprFuncArgs.toterminal.if_self_args_.tw_line_": {"doc_hash": "90446d1646a4a52b24f0f035674c95625a6e049c60efb66959110e5d81c77cb2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_getrawcode_getrawcode.try_.except_AttributeError_.return.obj": {"doc_hash": "a45fcef937c4160595577d9efc57001861ecc3c49435814ad4bd2b2b4eb502e0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_if_PY35_RecursionErro__PY_DIR.py_path_local_py___file__": {"doc_hash": "7e56a06d0b59381d81a03bd521fd11fa905a533484bccf8d55aa835c174256ba"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_filter_traceback_": {"doc_hash": "cd1871a09e6309349370d57a583c2495242261d3011743216c5f8051d666e60c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/source.py_from___future___import_ab_Source.__str__.return._n_join_self_lines_": {"doc_hash": "1c56a393bc9c4f3239768884ea6e8b5c36bc667b3937e7ddcad8a29a11675909"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/source.py_Source.compile_Source.compile.try_.else_.return.co": {"doc_hash": "ad6e54299af6a1b10152dcb7cf0ca1acbc5e0ab3d31456398972d84d2e4addd8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/source.py___compile_.return.co": {"doc_hash": "e853ee5d6801b6738a9e0659106941ba719fc2546ec4c97261c545e50dc87c41"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/source.py_getfslineno_getfslineno.return.fspath_lineno": {"doc_hash": "45c6e46c1fbd96f59fe0a8b2665a090dedeed3765513ee4f3093705c3ba64f1d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/source.py_None_3_deindent.return.textwrap_dedent_n_join": {"doc_hash": "2ec2e41ebd2749ee66f18166dbefdc7d9fbaecb6234b4206e961c3713801d86d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/source.py_get_statement_startend2_get_statement_startend2.return.start_end": {"doc_hash": "571954914d5383b03411f57579f43a87d76b7280ca35416c9e6a21470dd35e73"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/source.py_getstatementrange_ast_": {"doc_hash": "1045c173d867324055a2740d123402488f5f385435364490f886b1a8f5d4ebbb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_io/saferepr.py_pprint__call_and_format_exception.try_.except_Exception_as_exc_.return._s_s_raised_in_rep": {"doc_hash": "4eaa7411aa54759e0b2cbec9e8445482ff5302b1823f88239c882367e0ca28c1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_io/saferepr.py_SafeRepr_SafeRepr.repr_unicode.return.s": {"doc_hash": "b45812c645ed052a40aa8921fb948d62d666aca97f8c446ac069901fc5ecfe28"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_io/saferepr.py_SafeRepr.repr_instance_safeformat.return._call_and_format_exceptio": {"doc_hash": "e8e406a16575236bea473a1d5cdab6140ad1f4bc3e1a779cbfad1dee11e2a828"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_io/saferepr.py_saferepr_": {"doc_hash": "d24777d0f90e4e44e6675a2ff898691e9ad31b55ab4458b83113aff5e4583281"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/__init__.py___pytest_addoption.group_addoption_": {"doc_hash": "6f108d26d2bfd9597178e55eca3b093347938c0ae5a73046c9e0fe12f783257a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/__init__.py_register_assert_rewrite_register_assert_rewrite.importhook_mark_rewrite_": {"doc_hash": "4b02d6b9961fdc8c51ebc7b28ba27f6cba8682d42f92d7c387063936aeb7a82b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/__init__.py_DummyRewriteHook_pytest_collection.if_assertstate_.if_assertstate_hook_is_no.assertstate_hook_set_sess": {"doc_hash": "70f2e6785fc11c4c1fb675d4ce6d693b7c76971b95b4e9b5aa66260af393c798"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/__init__.py_pytest_runtest_setup_": {"doc_hash": "6a3bd8c54058e7076dba245f1567f28ed0ec6cbc925cdaf40ea170091f8f980f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py__Rewrite_assertion_AST__if_sys_version_info_3.else_.ast_Call.return.ast_Call_a_b_c_None_N": {"doc_hash": "318a1bd9154b027685584adb84447ed88cf76a2f2d0528acdcf57eb22040029c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewritingHook_AssertionRewritingHook._imp_find_module.return.imp_find_module_name_pat": {"doc_hash": "edfe7a9a9780a52b3bb7e2e1b42b15c96d18236c3f575ac15b7e873363a510ea"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewritingHook.find_module_AssertionRewritingHook.find_module.co._read_pyc_fn_pypath_pyc_": {"doc_hash": "9f0cc0784b3a414a87ce2010496059cd9854afec9a7e68b925d95075238c025b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewritingHook.find_module.if_co_is_None__AssertionRewritingHook.find_module.return.self": {"doc_hash": "a1c972931a89a6f0aea6e8ceefc3b30e6fcea9eff8fd0871be00097d1b7ff247"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewritingHook._early_rewrite_bailout_AssertionRewritingHook._early_rewrite_bailout.return.True": {"doc_hash": "5db053ecc676d74e1e5d2569e600c453c27c0fe682a13ae4177041e69a809d84"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewritingHook._should_rewrite_AssertionRewritingHook._should_rewrite.return.self__is_marked_for_rewri": {"doc_hash": "7fd553fd6d98ceefc554f7d7ccd149be61e89a969b9cf52bb5fa56edbf251f53"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewritingHook._is_marked_for_rewrite_AssertionRewritingHook._warn_already_imported._issue_warning_captured_": {"doc_hash": "83ecd1759307d5b7ea4705e849cbdbee10431eb2b90e71adc67bd73d0281a786"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewritingHook.load_module_AssertionRewritingHook.load_module.return.sys_modules_name_": {"doc_hash": "18b74dfb6b33b7ca592a45242d732f639781304ad4f6054518054fabcafe93f4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewritingHook.is_package_AssertionRewritingHook.get_data.with_open_pathname_rb_.return.f_read_": {"doc_hash": "ec9f3f2ba511b17fcd64e10659ac56ef576e28f52ae9276308b0d6542db1ef51"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py__write_pyc_BOM_UTF8._xef_xbb_xbf_": {"doc_hash": "eaa3564757b3907b183474d8b155a2d53dde2398d8a1ba86848a800d74dd5591"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py__rewrite_test__rewrite_test.return.stat_co": {"doc_hash": "bab9799102cccdc3c23646dc41cba48e7f70c4911a0f6ac6f1acf0bc83a68ffe"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py__read_pyc__read_pyc.with_fp_.return.co": {"doc_hash": "52306970278392e5461ca0d51192ccd9949d20a45fa1e00c7e5545ad26869e96"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_rewrite_asserts__saferepr.return.r_replace_u_n_u_n_": {"doc_hash": "243f0cae206f336340df0c3deeebfe987775c17baab4e530c3c3c708197e8ce0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py__format_assertmsg__format_assertmsg.return.obj": {"doc_hash": "2f39c7a4bdd17f8311d9e4d7a99f14619b357616f39b596d618bc7650482155f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py__should_repr_global_name_set_location.return.node": {"doc_hash": "56570d27acb2c921c82fdd3a381522b170772cab19bbb4163e307bdcbf966a41"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter_AssertionRewriter.__init__.self.config.config": {"doc_hash": "45a17fc2b18560d7d18db5b593bff104c1ae3dfc14e32cec3ea1c5cae9bd478a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.run_AssertionRewriter.run.while_nodes_.for_name_field_in_ast_it.if_isinstance_field_list.elif_.nodes_append_field_": {"doc_hash": "bbe78fb2ebdb1725414c2a908df6fb911cf27fc7c5f3bfc8e3f6b7f7252c3207"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.is_rewrite_disabled_AssertionRewriter.builtin.return.ast_Attribute_builtin_nam": {"doc_hash": "5a25ad77f11b9ad7432fd2bf945994f4347bc4c8ed3a53c83f9176fd7fed5db5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.explanation_param_AssertionRewriter.explanation_param.return._specifier_s_": {"doc_hash": "dc1aee72d12a404bdbeaf3406d61c43723ef64159ecd00ae09c9b1eed0eeef8e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.push_format_context_AssertionRewriter.push_format_context.self_stack_append_self_ex": {"doc_hash": "c67f9cd6cf94ca1719af6aec9222c025b73b3be631be30bf2c9b8b3558f7c7f4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.pop_format_context_AssertionRewriter.generic_visit.return.res_self_explanation_par": {"doc_hash": "73c6a3890674f37245269e124d8cd289c1f49e64b88e2a99108c9eb94a56fbd6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.visit_Assert_AssertionRewriter.visit_Assert.return.self_statements": {"doc_hash": "abb61d12e5221fd37145e0d6c14636360696e79fe0417413024ac3f31f65f801"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.warn_about_none_ast_AssertionRewriter.warn_about_none_ast.return.ast_If_val_is_none_send_": {"doc_hash": "4803b87ec730b4ea4465641d8bb2d11f52f655fcbb19b6e5244a1699414f58c2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.visit_Name_AssertionRewriter.visit_Name.return.name_self_explanation_pa": {"doc_hash": "67ca20b7c44ffc916e3d09db0e4035f5a0f317c758c41ae6df2686e35b6df7e1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.visit_BoolOp_AssertionRewriter.visit_BoolOp.return.ast_Name_res_var_ast_Loa": {"doc_hash": "189ade7f78970d1d7091d04f4b4ef20935010eaf44a3617453c55cbae1839a54"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.visit_UnaryOp_AssertionRewriter.visit_BinOp.return.res_explanation": {"doc_hash": "c3888df8c04014a4fd834ace430ac6e7c24a7e023953dd22b9bac6cee67a0d90"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.visit_Call_35_AssertionRewriter.visit_Starred.return.new_starred_expl": {"doc_hash": "229f3c3e6d55d00043d0ac4d2fd586605e9aa92212dc2ad2d44637caa7e07fce"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.visit_Call_legacy_AssertionRewriter.visit_Call_legacy.return.res_outer_expl": {"doc_hash": "a104ded28b353b75a3a9bd10ef64111009aa82c148806b8152f69ec2807da8d5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter._ast_Call_signature_chan_AssertionRewriter.visit_Attribute.return.res_expl": {"doc_hash": "fedd3784ad1e30dd7dc7418c163cf85018aa2585947b5b4400b925b690c852b1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.visit_Compare_": {"doc_hash": "83ca51ef53ce60cf51d4f39b92f972ba2cd147a36322f8a26b708f7e01bad4ea"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/truncate.py____running_on_ci.return.any_var_in_os_environ_for": {"doc_hash": "8d6aed8cd7c73f73ebc00f971c5cec18f25b9fae020a5dcb1cb560ed03302c2e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/truncate.py__truncate_explanation__truncate_explanation.return.truncated_explanation": {"doc_hash": "0d097b8ad62e343100276f50b422c63db1765b239117574c88f49b8feaa0cc38"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/truncate.py__truncate_by_char_count_": {"doc_hash": "f8dc0b0cca48c44efad41de1a05952e591d0afdf19bbc4e54e76d33329e8ad42"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__Utilities_for_assertio_ecu.if_isinstance_s_bytes_.else_.return.s": {"doc_hash": "034accc6129ddc7417b8bc0bcb87354c266aa5793b09a29ca0740492adfc80a6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py_format_explanation_format_explanation.return.u_n_join_result_": {"doc_hash": "61db0ee02f68e97d071a10a5a7252c0c8c5819a80cf5b9a7ca1d4bcefd331d83"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__split_explanation__split_explanation.return.lines": {"doc_hash": "7fb127cecee6b0a226f21732186259c3b5bea2929b5f6d9b60214ff68b9da8de"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__format_lines__format_lines.return.result": {"doc_hash": "5aa881662f51b88ad9ade7e0b03407e7b39f8ea901f591d451cc235d83971046"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__Provide_basestring_in_p_isiterable.try_.except_TypeError_.return.False": {"doc_hash": "942640d785962bb630a46a6cbd8fb133af496074e61d4103153f1debd28c677a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py_assertrepr_compare_assertrepr_compare.return._summary_explanation": {"doc_hash": "43dca4ea1149cd334346c7a463196c27bdd9de67e6f7ee08de647bec1a25fb25"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__diff_text__diff_text.escape_for_readable_diff.return.r": {"doc_hash": "1f19d8d1c31c03fdae5354fafa810399d4bfa934c4f6b1ff19917994349f4243"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__diff_text.if_isinstance_left_bytes__diff_text.return.explanation": {"doc_hash": "21e0971e34c8e138dfe16094e3532e03236b5af0441d27df07f1a0d6f08bca7a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__compare_eq_verbose__compare_eq_iterable.return.explanation": {"doc_hash": "d4e4115507801eda64522ab29cbcc43e823ce23a0714bd8c6c66d622ed6e5994"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__compare_eq_sequence__compare_eq_set.return.explanation": {"doc_hash": "5881f83dd412fc47b7ae8b4ca02db1604f6735b7f96dfe8fe6898353f551fa8b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__compare_eq_dict__compare_eq_dict.return.explanation": {"doc_hash": "dd62f45137ed346a3bfd211121cc4dc43c0d510401043323dd793827e1420760"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__compare_eq_cls__compare_eq_cls.return.explanation": {"doc_hash": "a51ea36864bbbbb105a29093e0939645a52cc5181e89caee0570cfd4e8b0b664"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__notin_text_": {"doc_hash": "d99d967869e343545a4e8d8a51121c7804b2a2de875dd7fe0b210bd2d9516d91"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py___CACHEDIR_TAG_CONTENT.b_": {"doc_hash": "14c2c0b156574fe15eb18e1e5ff1372c90d6984aba5310a83459a1a1d3077206"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_Cache_Cache.warn._issue_warning_captured_": {"doc_hash": "3663ec2493badf68a2eb595d9e679d0c3b136082d746c6aa31d4df8613572bb5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_Cache.makedir_Cache.makedir.return.py_path_local_res_": {"doc_hash": "cf660b1bef414d2d056d9bb915b97aedb49d1c71c4fe3a2cb52a89afb18d1a11"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_Cache._getvaluepath_Cache.get.try_.except_ValueError_IOErr.return.default": {"doc_hash": "639c0703b0f1378eac0bcad5b9010885719530f3df765e9b28c4ac6ec0aa01b2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_Cache.set_Cache.set.None_1.else_.with_f_.json_dump_value_f_inden": {"doc_hash": "0cdb34a9be500ce0897f7c2a7650b575494a487c26113ea7eaa5f3277c35ff5b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_Cache._ensure_supporting_files_Cache._ensure_supporting_files.cachedir_tag_path_write_b": {"doc_hash": "10ff57a7f0ffaa5ded6d1590b5d1bd45a1c6f92698487a86161c8d9aa0db125e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_LFPlugin_LFPlugin.pytest_collectreport.if_passed_.else_.self_lastfailed_report_no": {"doc_hash": "e85a6f6113b982df8fd9d906f88f3cac3053bd67ff35b8b0c33210be5d0e324e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_LFPlugin.pytest_collection_modifyitems_LFPlugin.pytest_sessionfinish.if_saved_lastfailed_se.config_cache_set_cache_l": {"doc_hash": "3edb4e2d5d3aff562398772a6dc1e229ab27c716c5ddc60efab70fcef429dfed"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_NFPlugin_NFPlugin.pytest_sessionfinish.config_cache_set_cache_n": {"doc_hash": "9dc7a4cd9ee0292fc00d03bec4d11977039ef2086eb3911d25b3eca50e3849dd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_pytest_addoption_pytest_addoption.None_6": {"doc_hash": "d1e225ba5e7eccd9ffdafd22a3d51a6cf0594a73c2b97618b6e2d25ac2ce75d3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_pytest_cmdline_main_pytest_report_header.if_config_option_verbose_.return._cachedir_format_dis": {"doc_hash": "e1bf0d377450e333538629b973660d5a3f862fe1b17df6511e5cf5801aab0aac"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_cacheshow_": {"doc_hash": "63160e6dba2f1f153b1ce0144dfd7ec7a4717fad4550bea90951a36116b4421a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py___pytest_addoption.None_1": {"doc_hash": "4d0829e3939f90dcc78301f5927127057bdec7cb7508743c7621896bff17fd9a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_pytest_load_initial_conftests_pytest_load_initial_conftests.if_outcome_excinfo_is_not.sys_stderr_write_err_": {"doc_hash": "d8cfa8ff6720a7f4f5a546df03a6e6b8d1a039d2d10cfd6404dec4e07687d156"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_CaptureManager_CaptureManager.activate_fixture.if_fixture_is_not_None_.fixture__start_": {"doc_hash": "0040703c23bb47b8eb764cbfd47248d6e3bbca085bcd32343f54bb389d4deb59"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_CaptureManager.deactivate_fixture_CaptureManager.pytest_internalerror.self_stop_global_capturin": {"doc_hash": "18051468c573141b5500bcc7195d7140dfbe01ad092dea532012603f2c584af1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_capture_fixtures__ensure_only_one_capture_fixture.if_fixtures_.raise_request_raiseerror_": {"doc_hash": "6bbae1ae5f0c712f899b4e38318bb26408852a508c292531df777ef8eb67e4a5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_capsys_capsys.with__install_capture_fix.yield_fixture": {"doc_hash": "266b8fb600a515b6de61101d014177ad4411d85b3392fb354cff7c3982b73952"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_capsysbinary_capsysbinary.with__install_capture_fix.yield_fixture": {"doc_hash": "cca2c7bc23cf24ce2ff188c075b699df1ad3e859066d8b306c232a88e784ff43"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_capfd_capfd.with__install_capture_fix.yield_fixture": {"doc_hash": "8328465588be70ecb98a6ea7b1fbeb74bf1e344a135ee968be3b84e835a63c9c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_capfdbinary_capfdbinary.with__install_capture_fix.yield_fixture": {"doc_hash": "63d2247a2255fc2815fe44a69aff62cb82c7b7e93a47d0fbbb481353fce295e3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py__install_capture_fixture_on_item__install_capture_fixture_on_item.del_request_node__capture": {"doc_hash": "eeed6887bb1d76129c9101092b4385328888f90a1ca59619ad84b862488cc000"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_CaptureFixture_CaptureFixture.close.if_self__capture_is_not_N.self._capture.None": {"doc_hash": "ec269f519ec3478eec5df2fceedd48a6a39862badde481cd381c33a59b7d50e0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_CaptureFixture.readouterr_CaptureFixture.disabled.with_capmanager_global_an.yield": {"doc_hash": "2f2e45628558a626d634abbea08e4fe98368aca3f1f80eaf9c7e48a59585f48f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_safe_text_dupfile_safe_text_dupfile.return.EncodedFile_f_encoding_o": {"doc_hash": "f5eacb4293fe50f086bbdf38c84e7709e007aae2c2f29f01dad0d091a3d531cd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_EncodedFile_CaptureResult.collections_namedtuple_C": {"doc_hash": "3919e1288ae9ae8aa8df67926f6f177b20d3e3cb399c2d1a3c3fe518dcb8260d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_MultiCapture_NoCapture.__init__.start.done.suspend.resume.lambda_args_None": {"doc_hash": "08b15e74b2d5ddb616fff5bd5de9e040034aa254ef119030a4e36c8b523d564b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_FDCaptureBinary_FDCaptureBinary.writeorg.os_write_self_targetfd_sa": {"doc_hash": "166224ce4c4335635c6a8dfcdb7d5f93377228a041ced882088a66af0e5fc11d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_FDCapture_SysCapture.writeorg.self__old_flush_": {"doc_hash": "e1a8a2532ab14c0d72632b1e2355f79b7c2ba472351b9361dce2bd50a5bf4d8e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_SysCaptureBinary_DontReadFromInput.buffer.if_sys_version_info_3.else_.raise_AttributeError_red": {"doc_hash": "1c1c686501e1548913b40837d8d105b7597a413d01f23f40a69a7c9968f59361"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py__colorama_workaround__readline_workaround.if_sys_platform_startswit.try_.except_ImportError_.pass": {"doc_hash": "849d2eeee0133c4ac8a5274140e197da6777a776ce2e8c9056f6167c38311662"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py__py36_windowsconsoleio_workaround_": {"doc_hash": "07f7e1b61598ad34a2d22ee2c14d2c5b70330e6ac113bfa0dd272d46d1e5c96b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/compat.py___getlocation.return._s_d_fn_lineno_1": {"doc_hash": "9314fa9dc4278b89836e10c7701123d9f5bb093b46a65a2d3d95b6d472911cd0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/compat.py_num_mock_patch_args_num_mock_patch_args.return.len_patchings_": {"doc_hash": "e894d4bf7a25750a6d25b756a873e9490a40695f4d8e9889ad52c4f461d5a928"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/compat.py_getfuncargnames_getfuncargnames.return.arg_names": {"doc_hash": "7d1b0026b55b26c4cc0a4db188e1abb6ac5518c2a738cc4c5725e2c6f85c9bba"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/compat.py_dummy_context_manager_None_3.else_.ascii_escaped.return._translate_non_printable_": {"doc_hash": "e18e0cd56b6ad169414a2a5e4fa1f550b7fbc305c35658e75dedc2de1ef79de1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/compat.py__PytestWrapper_get_real_func.return.obj": {"doc_hash": "9d8bf74888b5e706c526aa6e7c4e644111f127d7474521ac5fdfcdb34816dc31"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/compat.py_get_real_method_get_real_method.return.obj": {"doc_hash": "68383dec8ab0d409351e48e9ff127eff917231a69fac3239205ad029071c03ab"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/compat.py_getfslineno_": {"doc_hash": "416093948d26584da7c5d12d8617444329f2032282eed7f705a9235f8c6364ce"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py__command_line_options__ConftestImportFailure.__init__.self.excinfo.excinfo": {"doc_hash": "7b7c6b130fe15662574622f6f2546a906a2fb0dcce986118a58dc333d556c8e2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_main_main.try_.except_UsageError_as_e_.return.EXIT_USAGEERROR": {"doc_hash": "4938d5c6eb22b53167a865063490f5d2a298047dde8835afb3f9773f9cceb92d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_cmdline_get_plugin_manager.return.get_config_pluginmanage": {"doc_hash": "05a8e9ced2305d9c4f271003863375701fe70f88c1581e568e65fca582d48bcc"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py__prepareconfig__prepareconfig.try_.except_BaseException_.raise": {"doc_hash": "8414e72fe54c574b71200e5ac7e291ca482e2e97613d56f6e7fe70b36164241c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager_PytestPluginManager.addhooks.return.self_add_hookspecs_module": {"doc_hash": "65bbd7749174d064d90b08de49227d7252bd100537950e371896961d6533e265"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager.parse_hookimpl_opts_PytestPluginManager.parse_hookimpl_opts.return.opts": {"doc_hash": "bdbc13b83e0f39e7c02cbe9da61e1808a7c3e8655b6b4256c32639b8f5217e54"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager.parse_hookspec_opts_PytestPluginManager.parse_hookspec_opts.return.opts": {"doc_hash": "c43a5e16500097c4f887327d35f427ab9e881642debb66c064dfa55e1bca66d8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager.register_PytestPluginManager.register.return.ret": {"doc_hash": "33e4b20a0703e3c62ac6d256688f959f5fed8e3964b9cbb273420cb2dbe889ce"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager.getplugin_PytestPluginManager.None_3": {"doc_hash": "92784dcdabb68ce09d335d119ba9e57a225a7fb94e7907604d9d1a6b3a0a9253"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager._set_initial_conftests_PytestPluginManager._set_initial_conftests.if_not_foundanchor_.self__try_load_conftest_c": {"doc_hash": "3bb7991be78c90d6b3f4620fe150cd8d7db746f98bfcee1dfb8d8f7839551ce5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager._try_load_conftest_PytestPluginManager._getconftestmodules.return.clist": {"doc_hash": "24544a941a098446e34dbb849eb859561b5387a9d53a648aefa4658975ec51cc"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager._rget_with_confmod_PytestPluginManager._importconftest.try_.except_KeyError_.return.mod": {"doc_hash": "8ad2bb70d0890b06bde1df6c685d91501d73d3d0b8fd21e7d46cfad8fc841afd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager.None_4_PytestPluginManager._import_plugin_specs.for_import_spec_in_plugin.self_import_plugin_import": {"doc_hash": "6c92bcc310185bf2bbf50cea61077823a55b3a782eb6ca1768ddd621afecc7b5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager.import_plugin_PytestPluginManager.import_plugin.try_.else_.self_register_mod_modnam": {"doc_hash": "3042d601275cadc9167deb6220c440fed79b0d3a47e9a2372df1d0702c5dd89d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py__get_plugin_specs_as_list__get_plugin_specs_as_list.return._": {"doc_hash": "606d0c26f6c7d8e52da9809c6361b460a52f5f8e4f833dbc7b4480634ba40628"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py__ensure_removed_sysmodule__iter_rewritable_modules.for_fn_in_package_files_.if_is_simple_module_.elif_is_package_.yield_package_name": {"doc_hash": "4c696f85a8fd73f8cec3bc6a30cdda3c6f12e70a5423509caa20b11ceafbcf38"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config_Config.get_terminal_writer.return.self_pluginmanager_get_pl": {"doc_hash": "ea6f2e79aad0d0ff7d4c73191697233d5663f893e0ba957d2bd66a2ab4a732ab"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config.pytest_cmdline_parse_Config.pytest_cmdline_parse.return.self": {"doc_hash": "5cd89f8a51c2b9f9c000234197718235f89533a96694ae4c2a31a43a9e535576"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config.notify_exception_Config.notify_exception.if_not_any_res_.for_line_in_str_excrepr_.sys_stderr_flush_": {"doc_hash": "a9293369ec64a8f3ebddc1c7603416fccd2675c1c4180373daa05e8d9dc59abb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config.cwd_relative_nodeid_Config.pytest_load_initial_conftests.self_pluginmanager__set_i": {"doc_hash": "52fc5320fb0d1ddb4c4f83d159cd881e47129822f329637298200ecfb6e1f08a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config._initini_Config._initini.self._override_ini.ns_override_ini_or_": {"doc_hash": "a7690911b04c1d1575c7946d75bbc9df9d0ebb588813a22cd374e1b01b189a29"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config._consider_importhook_Config._consider_importhook._warn_about_missing_asser": {"doc_hash": "21fc2d11a0be42286ea11582c1c6d1111a825fe11de0e9709564d38314da74b3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config._mark_plugins_for_rewrite_Config._validate_args.return.args": {"doc_hash": "7b6f3f664b35521c569ce282b5b02302fd9d93082b607090745a2ff0e27eb626"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config._preparse_Config._preparse.try_.except_ConftestImportFail.if_ns_help_or_ns_version_.else_.raise": {"doc_hash": "509bb0f9308773129a6ccac226c92159865105b3bdbfd631754527bdbf3135a6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config._checkversion_Config._checkversion.if_minver_.if_parse_version_minver_.raise_pytest_UsageError_": {"doc_hash": "0a9e37771d40cc91b1f97714d328a181c3b36b78411df3252a2380f8b34fee23"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config.parse_Config.parse.try_.except_PrintHelp_.pass": {"doc_hash": "34e93555afb523eae5083e13a6e4188f99f8372102ffa9061833ed2aab679089"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config.addinivalue_line_Config.getini.try_.except_KeyError_.return.val": {"doc_hash": "0a988dac7230fdd41e231fc444ba733e2bfd752b6d60efd30ee544fcc05dfb48"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config._getini_Config._getini.if_type_pathlist_.else_.return.value": {"doc_hash": "cd4804f218c2a1038aa0e48fefc251b867fd62526605a9cd8128afe43f4c2387"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config._getconftest_pathlist_Config._getconftest_pathlist.return.values": {"doc_hash": "420963d301e35f80e2e19cc747ebe67a46c180c90b913d5ef31b47a5e1b6a816"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config._get_override_ini_value_Config._get_override_ini_value.return.value": {"doc_hash": "57511b6b76f994027b16fad1806b0be66284737e470a825d172137f03e256fc6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config.getoption_Config.getvalueorskip.return.self_getoption_name_skip": {"doc_hash": "f69f845cd67fc7fb700a82290aea89b23aab389205e0d38b3a66dc3c7ba35c88"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py__assertion_supported__warn_about_missing_assertion.if_not__assertion_support.if_mode_plain_.else_.sys_stderr_write_": {"doc_hash": "c5b880449a324d863b6a3bf493d1950f70e61491a73d30495b2de76e33242eab"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_setns_create_terminal_writer.return.tw": {"doc_hash": "6be25b656a145c07598a195f5f0f892a4377e58b8f1d3afaa40b1efc4612aebb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py__strtobool_": {"doc_hash": "6310e675d6bb97cbef3f4a8d630373f591379abf9e274b7511f470293af4a737"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_argparse_Parser.processoption.if_self__processopt_.if_option_dest_.self__processopt_option_": {"doc_hash": "2eab5c430ce2c5eb8a48a9b64a72d7c88d9f44a77658932f309c249e7090c57d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Parser.getgroup_Parser.getgroup.return.group": {"doc_hash": "9f193025753b741e185816a96f82fdc8f9a7500b76981183e1b90a0a28e55e23"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Parser.addoption_Parser.addoption.self__anonymous_addoption": {"doc_hash": "05cb9f2f206e9735e1b7660f079e0d2b556700375a55081155905202cf56ffa0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Parser.parse_Parser._getparser.return.optparser": {"doc_hash": "c053c76c796acea4dd901bdb4cf2a8e9a2e4263754f3dca175a3146cc7f427de"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Parser.parse_setoption_Parser.parse_known_and_unknown_args.return.optparser_parse_known_arg": {"doc_hash": "ec28a87c619c3f2ff1728781b31eef6f83b725db83c86b0dde1adb42792a7df5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Parser.addini_ArgumentError.__str__.if_self_option_id_.else_.return.self_msg": {"doc_hash": "748300f47434ff1174fd4a9e1cf63747d0ae513d472f004802abcf9c14fd85db"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Argument_Argument.names.return.self__short_opts_self__": {"doc_hash": "cfe61128de51ec4f4c857a5993bf41ccf46366bc062f0ddd0a9a8abd7b9cde76"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Argument.attrs_Argument.attrs.return.self__attrs": {"doc_hash": "ae0d92948287c6caee06ede477f19c110a7a9a234ca52e08253483b45f942ae3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Argument._set_opt_strings_Argument._set_opt_strings.for_opt_in_opts_.if_len_opt_2_.else_.self__long_opts_append_op": {"doc_hash": "fc18978af297d52beca7e2bef373c6ce665b133445fa3d5b84682a745d14c9a4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Argument.__repr___Argument.__repr__.return._Argument_format_": {"doc_hash": "2bc8be04d1992a5000ce2da9f63f9250ed8b14673891aa03ccf7c01efa091c69"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_OptionGroup_OptionGroup._addoption_instance.self_options_append_optio": {"doc_hash": "69eebf1f2a6c8e306f3324c13911618f96caaa3edd727941c4e49f65370d7604"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_MyOptionParser_MyOptionParser.error.raise_UsageError_self_for": {"doc_hash": "c0b971c1eeca12d5a2899e4d4b514a9ef27ee8403aad6db844ba3b6645e20e3a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_MyOptionParser.parse_args_MyOptionParser.parse_args.return.args": {"doc_hash": "3dd23945898f7977285c54790e735bf51ec113ee4e15d8fd013a84680f2293a2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_DropShorterLongHelpFormatter_": {"doc_hash": "b43bcf35aedf4a3102d29d916c33c8e24a72e2cd47308f209ae2d645be8ea214"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/exceptions.py__": {"doc_hash": "6afe8f907d9e487581aca572d6ff460a824e473d7951b6981e236d03997a67b5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/findpaths.py_os_getcfg.return.None_None_None": {"doc_hash": "03a930eb0c03e3cf38e0b9f9cca7bb269547fb869b26702c735c93207546daaa"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/findpaths.py_get_common_ancestor_get_common_ancestor.return.common_ancestor": {"doc_hash": "b53cd120e850ba2328815a59aaf512f48f2e834c6776fe2bc6264414c1fd5ebd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/findpaths.py_get_dirs_from_args_get_dirs_from_args.return._get_dir_from_path_path_": {"doc_hash": "22aa5c2f6c775692588289758d88d5bd8600b76bbd13e7fc100cbbfa20984a71"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/findpaths.py_determine_setup_": {"doc_hash": "de4aefb27f1188c60300fa389111d8751fc82ac45847b1e73be1c8b4ace64896"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py__interactive_debugging__validate_usepdb_cls.return._modname_classname_": {"doc_hash": "de04b86d2a36584584d40987ebfe641486a126c48afd346354c2d826863b61c4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py_pytest_addoption_pytest_addoption.None_2": {"doc_hash": "2f58e9bf1a309b636d0742a1d427ac208fbe56e8cc41c81ba9bacf5a4a1f0517"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py__import_pdbcls__import_pdbcls.try_.except_Exception_as_exc_.raise_UsageError_pdbcl": {"doc_hash": "c94049e213fcd43e44bc21ae1a7512295064a1a64108747647e01b796872d30c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py_pytest_configure_pytest_configure.config__cleanup_append_fi": {"doc_hash": "21d0e5807baec0a3b877dd768ac774d2e284dbf568c9b1fa1222f259e752f3ec"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py_pytestPDB_pytestPDB.set_trace._pdb_set_trace_frame_": {"doc_hash": "ef15dbcfa9cc1e55bb4b541a68f0866148d48bbae6a49abf4c298bd4f961f067"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py_PdbInvoke_PdbTrace.pytest_pyfunc_call.yield": {"doc_hash": "0e95787ca76b61e66c81e329fac7be171182328c25f5219c7ff59ccaa1f2204c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py__test_pytest_function__test_pytest_function.pyfuncitem._fixtureinfo.argnames.tuple_new_list_": {"doc_hash": "c4a4d30209fc1215b88eb35ac58f959bd5c42bef5aa5d77bb91c76ad0f01ea20"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py__enter_pdb__enter_pdb.return.rep": {"doc_hash": "3579580b2c9a978a73c14639eab61fd5535be82820fae0dea7a1df6ca839fa0f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py__postmortem_traceback_": {"doc_hash": "67561d213cfa12ab9c125b77f1962d9c551708445ca2fcf1a4bd70e81651f6e3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/deprecated.py___PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST._": {"doc_hash": "fc2623dbf29ef21147b07368a5353978ff7c44990fb3d78a75ccada6b14e11ba"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/deprecated.py_PYTEST_CONFIG_GLOBAL_": {"doc_hash": "ae1e097bc8f1d760cb3db185e7558e6b3b5c449f69231232f7bfa7a70d21581c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__discover_and_run_doct_RUNNER_CLASS.None": {"doc_hash": "9536980551e79ef5d8afbd7e81a4a00a8b3b6983ec721f2e89560c9f7c04d679"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py_pytest_addoption_pytest_addoption.None_6": {"doc_hash": "2ed62493bcf04b654f137b1651f56688b9d8ca4d808d011550100253d073a01c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py_pytest_collect_file_MultipleDoctestFailures.__init__.self.failures.failures": {"doc_hash": "4d647f4146352aff5f7d35e10ac3eb65636b19ab05b1ba2692a8c80f392be25d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__init_runner_class__get_runner.return.RUNNER_CLASS_": {"doc_hash": "c966bccad6d238df42b05f2050869406f77490e7546670642573da29b9c75f1b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py_DoctestItem_DoctestItem._disable_output_capturing_for_darwin.if_capman_.sys_stderr_write_err_": {"doc_hash": "440c7b720283216ae3c6b4a109cda03161a319285a6ed5cedd26f18fd31dd1cc"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py_DoctestItem.repr_failure_DoctestItem.reportinfo.return.self_fspath_self_dtest_l": {"doc_hash": "95b4ea9f2d0f941b58817b5a8b523c69b298c835f4ba535760e31512839312c8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__get_flag_lookup__get_flag_lookup.return.dict_": {"doc_hash": "9b4a0e6383c570d27c724ed586189ca08073e0294be5aaef38794de4af624b3f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py_get_optionflags__get_continue_on_failure.return.continue_on_failure": {"doc_hash": "0c651783950d3ee27168fdcf70d29c3cacabcbdc14b4bc1964a2a029b1bf4ed9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py_DoctestTextfile_DoctestTextfile.collect.if_test_examples_.yield_DoctestItem_test_na": {"doc_hash": "2157ee5b66b6f8d96e36a968daaadba310c57b667169d86b8fcd5a0b595667f4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__check_all_skipped__is_mocked.return._": {"doc_hash": "eb1e670043705bb635e6ec851f5c5ea0cb390f596af03be078f6f898b9ad98f0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__patch_unwrap_mock_aware__patch_unwrap_mock_aware.if_real_unwrap_is_None_.else_.try_.finally_.inspect.unwrap.real_unwrap": {"doc_hash": "04d6a49c5388f69447d207bf026579088e677c68bdcc70833e4ed9c46af4efef"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py_DoctestModule_DoctestModule.collect.MockAwareDocTestFinder._find.with__patch_unwrap_mock_a.doctest_DocTestFinder__fi": {"doc_hash": "fede8e884f86c94da1668fcaa39e5288894a44ae1bd6d15c40670090f53bbf06"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py_DoctestModule.collect.if_self_fspath_basename__DoctestModule.collect.for_test_in_finder_find_m.if_test_examples_skip.yield_DoctestItem_test_na": {"doc_hash": "707ef80a1e2dadd2e63a897b83bcd502919301da475cb4defa0bac42f5af8bd3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__setup_fixtures__setup_fixtures.return.fixture_request": {"doc_hash": "7a0610b850b7511f6d3eaa614fb2e050f07ac83087a16b1b0344dd7713589f0f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__get_checker__get_checker.re": {"doc_hash": "f3fbc13022cbf5c6f0c15f84ab840b58873b2e6318b27c86ae257ba22b1a9453"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__get_checker.LiteralsOutputChecker__get_checker.return._get_checker_LiteralsOutp": {"doc_hash": "aecdef74a3012121c85813ef6072a6ed73867f100c829be6ea53528a83b8aad8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__get_allow_unicode_flag__get_report_choice.return._": {"doc_hash": "baed13b64a7be75b0473ee3e217ce818f0ccc83225bd4eab924ce05ead80d09f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__fix_spoof_python2_": {"doc_hash": "11d1509c639dcd70fd855553f3d00ebc4d714cfbf02ef5e8cebec3a9afc256ff"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_from___future___import_ab_get_scope_node.return.node_getparent_cls_": {"doc_hash": "9843b2f7b11ef82f5530f7081f94a44fdfab80842b6d3e1c7cae1b67ae1cb5fa"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_add_funcarg_pseudo_fixture_def_add_funcarg_pseudo_fixture_def.for_argname_valuelist_in.if_node_and_argname_in_no.else_.if_node_is_not_None_.node__name2pseudofixtured": {"doc_hash": "85d6f195e4de451ea8a7766190eec3dd5f47294d860bdf0708036863153b3dec"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_getfixturemarker_get_parametrized_fixture_keys.try_.else_.for_argname_param_index_.yield_key": {"doc_hash": "61c6bc144ddc1b3eea102a8fdeb4869dc2d243b3a086f215633747c56646311a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py__algorithm_for_sorting_o_fix_cache_order.for_scopenum_in_range_0_.for_key_in_argkeys_cache_.items_by_argkey_scopenum_": {"doc_hash": "1b7a462d92b1de5b032337b333e9ca8f7ff297ac3729eabd21162c78edfd9bfb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_reorder_items_atscope_reorder_items_atscope.return.items_done": {"doc_hash": "012e1285887e824bab4c35a8b0a541e0df5e331a2e7e991a44fcaafd46c7869f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_fillfixtures_get_direct_param_fixture_func.return.request_param": {"doc_hash": "8643877092358385b0abb59ea6023134485589a9e31d2b84bd145a8da2927986"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FuncFixtureInfo_FuncFixtureInfo.prune_dependency_tree.self_names_closure_s": {"doc_hash": "696fcc3939ed6daaac4bea08899706e4a1d1010ef80dd7bf4d8399e19b7c10ab"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureRequest_FixtureRequest.node.return.self__getscopeitem_self_s": {"doc_hash": "e8196034bb8125fcd20b37e7dc4a01ccb29edd61d770622e6f21da8f2b72817d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureRequest._getnextfixturedef_FixtureRequest._getnextfixturedef.return.fixturedefs_index_": {"doc_hash": "398653d40372075b6ff65f71988faf914aba0f97c224131575c157db29e85c7b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureRequest.config_FixtureRequest.getfuncargvalue.return.self_getfixturevalue_argn": {"doc_hash": "47ec18287a18d4da792cb5a81bc656811b4c9ca6d803e234b60e6040256bc062"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureRequest._get_active_fixturedef_FixtureRequest._get_fixturestack.while_1_.current.current__parent_request": {"doc_hash": "888d734fde9652a8942d13916d6a54a5179861c0dbd7341c3459851bb3e8911d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureRequest._compute_fixture_value_FixtureRequest._compute_fixture_value.None_1.finally_.self__schedule_finalizers": {"doc_hash": "28fb83a305c77abdf8d7981cf8add7151540ed73358ddfc7640ccc9f9c83ff60"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureRequest._schedule_finalizers_FixtureRequest._check_scope.if_scopemismatch_invoking.fail_": {"doc_hash": "ff31571adf59e64a7d2df259e1b878b8bc94ffe602498238e227a5161a189ff9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureRequest._factorytraceback_FixtureRequest.__repr__.return._FixtureRequest_for_r_": {"doc_hash": "6d0ece7c07944225dfc06eca785e309ad037b9658e556a3dac6061d6b2b45cf8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_SubRequest_SubRequest._schedule_finalizers.super_SubRequest_self__": {"doc_hash": "f58428dc883d9b01c848d0fe6f9d80d46bb2c04d895f6e4ee72196d43af77a56"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_scopes_scope2index.try_.except_ValueError_.fail_": {"doc_hash": "e764d9b248ae5914cb4863ffb07541ba0de6d8338d17763ee3a6a0a8ce208e86"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureLookupError_FixtureLookupError.formatrepr.return.FixtureLookupErrorRepr_fs": {"doc_hash": "f9fc902a95671d8adaaab49df27a9581ebadf9235d190da4bf84e6d35bcf852c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureLookupErrorRepr_FixtureLookupErrorRepr.toterminal.tw_line_s_d_self_f": {"doc_hash": "97529f1c025694310a68edca783e06ded71f6bb41c3bfbf7fab20bbbd1efd687"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_fail_fixturefunc__teardown_yield_fixture.try_.else_.fail_fixturefunc_": {"doc_hash": "3b2c429173e1bac34c656a8b0f36e0e5b2843bda98b22d8efa962addc4557706"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureDef_FixtureDef.addfinalizer.self__finalizers_append_f": {"doc_hash": "448cd4fda05ff7b5b8efecf1e655bbefb5e0bbbeb5bd352dcda126a7533883a1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureDef.finish_FixtureDef.finish.try_.finally_.self._finalizers._": {"doc_hash": "be2a1cf3c1812b05c6f529801db9453f9d07b6db1f86639251762e126b5d14d5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureDef.execute_FixtureDef.__repr__.return._FixtureDef_argname_r_s": {"doc_hash": "1e5c36dd133cd8aa4b57630d7a30735a5332848a01e1ea9823bd402f856ad2cb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_resolve_fixture_function_resolve_fixture_function.return.fixturefunc": {"doc_hash": "f5756575650092e6fcf96bba95f3aa140605fa35959a89cebc4e1cb677e9602b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_pytest_fixture_setup_pytest_fixture_setup.return.result": {"doc_hash": "5b763454aefb25b2bc9a359739123e3740b0174a8c1ee1e044b10296b6045138"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py__ensure_immutable_ids_wrap_function_to_error_out_if_called_directly.return.result": {"doc_hash": "118de8fd4461c0d846dab2faec53415d58c2bc5d36f36b4d6d1d009eab6a11d4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureFunctionMarker_FixtureFunctionMarker.__call__.return.function": {"doc_hash": "9fd2d731528b722b00bcc50ef26ec19322ec13956460dc05896717aa51a0f002"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_fixture_fixture.return.FixtureFunctionMarker_sco": {"doc_hash": "2931462a420bfa487bb107616a58b7a280450435077f85dd69110dbac01e81dc"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_yield_fixture_pytestconfig.return.request_config": {"doc_hash": "e3ec4a60beabaeaad5f825fe8089fddf4ce772aacf81282b6028d497cadac22a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureManager_FixtureManager.__init__.session_config_pluginmana": {"doc_hash": "481dfefef2d906adda0620e686ede72c4beec1dd6370c7628a7ab0edae12317a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureManager.getfixtureinfo_FixtureManager.getfixtureinfo.return.FuncFixtureInfo_argnames_": {"doc_hash": "d7bfba419b8e5638323acebdb8e23c6fffeb0ed01f1b24b3c4fdb38580dc1caa"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureManager.pytest_plugin_registered_FixtureManager._getautousenames.return.autousenames": {"doc_hash": "647b19ee111324cd687d00bc9aacdda7d20b4cdbd536fd1c66c34ccf4634eae8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureManager.getfixtureclosure_FixtureManager.getfixtureclosure.return.initialnames_fixturename": {"doc_hash": "a1fa6d068f043d523523cafd434b1c385d4a3c32d067ccedc7b96eeac06a29ef"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureManager.pytest_generate_tests_FixtureManager.pytest_collection_modifyitems.items_reorder_items_": {"doc_hash": "fe66f02d671d226e5a4ef5715c4b5460d8872cd9646b7bab4efdfafdf1f5477c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureManager.parsefactories_FixtureManager.parsefactories.if_autousenames_.self__nodeid_and_autousen": {"doc_hash": "0e63fb0fa77082a9ae355bfb99a0244ae83323a29e4c1f371846bec48cbf24d2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureManager.getfixturedefs_": {"doc_hash": "370f420789967dedf1abb81f15cf2e3c8339c798efb5822efeecde9630fc0cce"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/freeze_support.py___": {"doc_hash": "c60a2b6970521860fcf48a9ff6a41f9e87f6fb768c53f99e86405031b983b772"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/helpconfig.py__version_info_help_me_HelpAction.__call__.if_getattr_parser__parser.raise_PrintHelp": {"doc_hash": "ee328c12e31db42fd595cb9875343ca57e96ceac4f37082ce7d22b3ffdf6e246"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/helpconfig.py_pytest_addoption_pytest_addoption.None_5": {"doc_hash": "b64d338f119c5fdff489cc6b79f0d3e0efc956a96bbde8bd1a78ddb9575660a5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/helpconfig.py_pytest_cmdline_parse_pytest_cmdline_parse.if_config_option_debug_.config_add_cleanup_unset_": {"doc_hash": "91a3b75a78b717d4dd86f50402a22154a296e2eab4dd6d49ae6dfa264fe1ac3d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/helpconfig.py_showversion_pytest_cmdline_main.if_config_option_version_.elif_config_option_help_.return.0": {"doc_hash": "6809c88acd7c01603274ccd5c1b2cb37dc20249dddbb79a55a6f454801ef1ff8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/helpconfig.py_showhelp_showhelp.return": {"doc_hash": "8ea3a7c4b066f407022a52561db733b58b73188380ecd9e8f00e02433e1c1222"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/helpconfig.py_conftest_options_getpluginversioninfo.return.lines": {"doc_hash": "91bd05a536b3f1e2e85ccfa6e297c3f4ad81a3deb0c1175d8fa73b710f8c59d5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/helpconfig.py_pytest_report_header_": {"doc_hash": "a0cde292fa42761e9baf650bdac997cc7dccbd775041efd7bd2a986e8bbb2fe0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py__hook_specifications_f_pytest_plugin_registered._a_new_pytest_plugin_g": {"doc_hash": "4cec334c19d58de7250b33b56983c3a85dee0da6b04d8683e9667f59f82c3fc4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_addoption_pytest_addoption._register_argparse_styl": {"doc_hash": "5aed31be1c4e6b625ba7ae5f843cfbd6cec5d353b09849a75dc0a0652ad66170"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_configure_pytest_configure._": {"doc_hash": "b9c735fb3d975b075d8f1c9697f982c70bb7787f58e1734c95d1e6fd09143c54"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_None_4_pytest_cmdline_parse._return_initialized_con": {"doc_hash": "d38f554683412a975fb888212938a1c14b1e1e1aee32b3b7d8c7be8dc4e966ef"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_cmdline_preparse_pytest_cmdline_preparse._Deprecated_modif": {"doc_hash": "e554b9a58cff0aad7110dcbcbab910655387576e248e28fa7b2bc2055ce94f1f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_cmdline_main_pytest_load_initial_conftests._implements_the_loadin": {"doc_hash": "6b749eed1cf619c88626ca65e386f17f7483cfec96c6494d4cf4edde7a901633"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_None_8_pytest_make_collect_report._perform_collector_c": {"doc_hash": "62322c7cc32afc6fd3c6dd8f6ac2420ed376a926bb951e02fb18fb297e004cc8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_None_12_pytest_pycollect_makemodule._return_a_Module_colle": {"doc_hash": "989040818b200f3dba16b94c4cf535463af93cde2923d22582a70038fff2de8e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_pycollect_makeitem_pytest_generate_tests._generate_multiple_p": {"doc_hash": "c66c66e77232b65e012a8e6d793f728057c0b278bfd3e6e25636a1f3a97d229d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_make_parametrize_id_pytest_itemstart._Deprecated_use_p": {"doc_hash": "e45ffd58d60246162d175e57b8d704437c4c3e52f1d6ec1e3b7bee8a91f0ad59"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_runtest_protocol_pytest_runtest_protocol._implements_the_runtes": {"doc_hash": "424de0d071839cd21c6135fbf72f4016c57f08d6fd5ee769fbb732e1cb63df10"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_runtest_logstart_pytest_runtest_logreport._process_a_test_setup_": {"doc_hash": "100d60938f27d5aebeddcfe7fc5ef836efc3f7522c27fd974352993e52e66bb7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_report_to_serializable_pytest_report_to_serializable._": {"doc_hash": "0a9cc40d5667dadea997f4d5e08cdc720d64b243329b910be7b4a969abbee4ca"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_report_from_serializable_pytest_report_from_serializable._": {"doc_hash": "77bef2776f70cbc144287d8d1e33398cf11f0d4ef22782f980c37ef890ea4192"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_None_18_pytest_unconfigure._called_before_test_pr": {"doc_hash": "7d3db1ff788ba021868a3da6405681e0be599303c3f24634fdcce008b34305eb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_None_24_pytest_assertrepr_compare._return_explanation_for": {"doc_hash": "e5d7368a82aa4a426150aa2bddf453ed3ea1793517d03e933af9d7d550f00d13"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_None_27_pytest_report_header._return_a_string_or_li": {"doc_hash": "09a1ed5808da74f2a672ae23dbc690190383e546e539bba15f199475d79eb5ff"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_report_collectionfinish_pytest_report_collectionfinish._": {"doc_hash": "73ec5b29e418988fb4f3d72d641d17a8e059f9018a9fde3a29ed80be4aafb155"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_report_teststatus_pytest_terminal_summary._Add_a_section_to_termi": {"doc_hash": "1aaeb5b598975bf6e15098484b3e587f7e5b021fd3d0d74e991861295a80d891"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_logwarning_pytest_logwarning._": {"doc_hash": "f20bdb8753027e7d01bb271aa890fb89d00e5fdfad8d1b3b3618296e29f47e1e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_warning_captured_pytest_warning_captured._": {"doc_hash": "00e109447590da93e332cb9ed4a46af055a6cc0c76444602cce97e710a832ec5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_None_30_": {"doc_hash": "dd7c5d207927b114a23d35cfb1ec156bdff78d4895c8eb09d3a3bd5bf1f6778c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py___families_xunit2_fami": {"doc_hash": "e94ad447de9eb30509cd77c2280512e7f561bc5607455b1aa62aabca02805f52"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py__NodeReporter__NodeReporter.make_properties_node.return._": {"doc_hash": "56e2e966e311badcf536dcf5b19a63a92a493ab4177ae7d888383e26f8cd6c04"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py__NodeReporter.record_testreport__NodeReporter._add_simple.self_append_node_": {"doc_hash": "2864f6df51a6b48d5b00c33a3242944d903510a6772f73604f97e35f06dbeb08"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py__NodeReporter.write_captured_output__NodeReporter.write_captured_output.None_1.if_content_.self_append_tag_bin_xml_e": {"doc_hash": "79f8e3628005c10447221c8756a8d0f43b3006fe2644f2e8c6ba9525fdfac847"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py__NodeReporter.append_pass__NodeReporter.append_failure.if_hasattr_report_wasxf.else_.self_append_fail_": {"doc_hash": "a673940dcce83d266d4cd9ac6289121296de174d790e609d760c658eb6ea0f35"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py__NodeReporter.append_collect_error__NodeReporter.append_error.self__add_simple_Junit_er": {"doc_hash": "b0ae9dd3ddbb53bb7adebc9ab8ebea5464c06312a37e127b50ea705c0ada7210"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py__NodeReporter.append_skipped__NodeReporter.finalize.self.to_xml.lambda_py_xml_raw_data_": {"doc_hash": "1f6e4293997217a6842ef1b277df1f2421ced08274c93384897244aaf26c56c8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py__warn_incompatibility_with_xunit2__warn_incompatibility_with_xunit2.if_xml_is_not_None_and_xm.request_node_warn_": {"doc_hash": "315a11006e4ab766ae9b1bc4bcad3069786d12efdb7811027295e2c506297dcc"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_record_property_record_property.return.append_property": {"doc_hash": "65df067a6b8005037fbcb95e3e72f13b7f956e6bff58af4114dcbd3f9681d46a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_pytest_addoption_pytest_addoption.None_5": {"doc_hash": "5fb33380b648601f4a0fbbdc8528f74c1d491ed588da6a3f855fe66101aa8002"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_pytest_configure_pytest_configure.if_xmlpath_and_not_hasatt.config_pluginmanager_regi": {"doc_hash": "e4ec43a8eaa0699057345706ab3a602610c47dccf3bedaf58fbcb7ae0f540902"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_pytest_unconfigure_mangle_test_address.return.names": {"doc_hash": "00746c378700b7ef1bc3866521959bd8a6ab19a1c8873902b8f39788f806ce48"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_LogXML_LogXML.finalize.if_reporter_is_not_None_.reporter_finalize_": {"doc_hash": "d09addef4f742873e31c31a0e6bc0ca97703d1a72dac4253e869035708d54035"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_LogXML.node_reporter_LogXML._opentestcase.return.reporter": {"doc_hash": "99b5b596c9a132658d60b7de0d72546b33c6747f4258533855bd6300b7f7675a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_LogXML.pytest_runtest_logreport_LogXML.pytest_runtest_logreport.if_report_when_teardo.if_close_report_.self_open_reports_remove_": {"doc_hash": "76cc1d4df99c9d038e9dcfef7e1a3e8ec9a94055d4ce906c58ffe921d32786f6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_LogXML.update_testcase_duration_LogXML.pytest_sessionstart.self.suite_start_time.time_time_": {"doc_hash": "cc16658a9ba9f86c64ee557db9008c781c364194ce4cd987618c312f83e18cc8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_LogXML.pytest_sessionfinish_LogXML.pytest_sessionfinish.logfile_close_": {"doc_hash": "6df4e03db7dca94f82bfb11bc142258a9c7f5940d36fa93fb81cba65096e2a1c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_LogXML.pytest_terminal_summary_": {"doc_hash": "14839e43c47d8bac968819de07027f02c8eb754adcc8d8a9baa16f9c239bc943"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py__Access_and_control_lo_DEFAULT_LOG_DATE_FORMAT._H_M_S_": {"doc_hash": "05e4fdc75930618c94b65abb5a935316eb0fdc3a912ec03170c9fe4956a22a6f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_ColoredLevelFormatter_get_option_ini.for_name_in_names_.if_ret_.return.ret": {"doc_hash": "0b76551b9d8b1d9e4291f8eeb111edff11b7b8753892172cf30feeb39a1e2a6f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_pytest_addoption_pytest_addoption.None_11": {"doc_hash": "59c08d3fd9c08238fed05ac336ac048ff816a0ad8af7e3d14fba627667161630"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_catching_logs_catching_logs.try_.finally_.if_add_new_handler_.root_logger_removeHandler": {"doc_hash": "e9f550f1d93be31daad3112c4c69fedf1a9de53e971a4e6cab8add6455ea0e2d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LogCaptureHandler_LogCaptureHandler.reset.self.stream.py_io_TextIO_": {"doc_hash": "69630ced0a3951854dbf19882d5e118c6cdef4188254600fbdfe598af5bd43c5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LogCaptureFixture_LogCaptureFixture.handler.return.self__item_catch_log_hand": {"doc_hash": "9dd0400484b40dfb49e1759a6ff8e4bd351b52241d4690a576bf41c162b88c3c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LogCaptureFixture.get_records_LogCaptureFixture.get_records.if_handler_.else_.return._": {"doc_hash": "8258f185315f3b5792e08ad8c6f79e3e96282ff7e7b43041586228c95155c26e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LogCaptureFixture.text_LogCaptureFixture.record_tuples.return._r_name_r_levelno_r_ge": {"doc_hash": "188c68b99d0713fc7b0e3eb04bd0ea7db3f1e945ecedcc3b02ff145020f87a35"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LogCaptureFixture.messages_LogCaptureFixture.messages.return._r_getMessage_for_r_in_": {"doc_hash": "7047f3e6a440e2cf71e3da225f06cf8d112cdbc15016b809ccb41426b87ac17c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LogCaptureFixture.clear_LogCaptureFixture.set_level.logger_setLevel_level_": {"doc_hash": "1faa9b7236a3efa106c6c2a4ae53d8f134edb21d28dcf2b64edb36ce81e429c6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LogCaptureFixture.at_level_LogCaptureFixture.at_level.try_.finally_.logger_setLevel_orig_leve": {"doc_hash": "7700c37ce8b01a6d85643c916ef7a0ae16631bd127d6bb8787a8f90785cd4740"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_caplog_caplog.result__finalize_": {"doc_hash": "7be0a652fff7d343f39eefa712af5e78decbecfef1467763da4a48e371db839e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_get_actual_log_level_pytest_configure.config_pluginmanager_regi": {"doc_hash": "f5c218fbe8f4de549555515c67f20e6ecb4d2facda41fd79b1fbc1716453bcdb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LoggingPlugin_LoggingPlugin.__init__.None_2.self__setup_cli_logging_": {"doc_hash": "4016a5a95d7c77d046902f57c6afec1c2ad2aea7c9f8340d5abf6c7dae4ad843"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LoggingPlugin._setup_cli_logging_LoggingPlugin._setup_cli_logging.self.live_logs_context.lambda_catching_logs_": {"doc_hash": "75efd059493ef14e8d8506e696fd3d62b957736330bf8ce098652541316256d6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LoggingPlugin.set_log_path_LoggingPlugin.set_log_path.self_log_file_handler_set": {"doc_hash": "4c43a5e02d0f39b4bde70ecf800a4713e782b08527f802d98d922f1d88a32cef"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LoggingPlugin._log_cli_enabled_LoggingPlugin._runtest_for.with_self__runtest_for_ma.if_self_log_file_handler_.else_.yield": {"doc_hash": "b73e8ad5353547e7cdc01db507ae9178ffaf51d87f848591772b501475f90021"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LoggingPlugin._runtest_for_main_LoggingPlugin._runtest_for_main.with_catching_logs_.if_self_print_logs_.item_add_report_section_w": {"doc_hash": "b7d887b8c50831e6596f7b675950cca80ca080437e73196d5d351579dd090078"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LoggingPlugin.pytest_runtest_setup_LoggingPlugin.pytest_runtest_logreport.with_self__runtest_for_No.yield": {"doc_hash": "2cd895baf92e30c3b66be555b5f3a72a2e4cccce4b9e08d82c180b3fef0f6994"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LoggingPlugin.pytest_sessionfinish_LoggingPlugin.pytest_sessionfinish.with_self_live_logs_conte.if_self_log_file_handler_.else_.yield": {"doc_hash": "85d62e294bfd26aa274ee0877cd68421f6c19381990519248dfc96d765a27f71"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LoggingPlugin.pytest_sessionstart_LoggingPlugin.pytest_runtestloop.with_self_live_logs_conte.if_self_log_file_handler_.else_._run_all_the_tests": {"doc_hash": "8a75bde645a305cbc4d1d009a08c068659ebeea9abf6a93b96c8ede16dfcadd8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py__LiveLoggingStreamHandler__LiveLoggingStreamHandler.set_when.if_when_start_.self._test_outcome_written.False": {"doc_hash": "5a4010f0a5dbf57cba4a624f2550a928502e86c444827620c58ebe465888093e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py__LiveLoggingStreamHandler.emit_": {"doc_hash": "4fc352406bd726132bbe7949e259f9828fa80aa09dc1216d515051074112a2c7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py__core_implementation_o_EXIT_NOTESTSCOLLECTED.5": {"doc_hash": "d2f877bf7e56383ac9e69034578bf02b9f605a0fd412a2e07a868cbd0f4bb4d1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py__ConfigDeprecated_pytest_configure._compatibility": {"doc_hash": "e43d6e56501f7e8a35e2e3a8cda25b74686f43bb62ff2b8a60d459a5cd8f730c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_wrap_session_wrap_session.return.session_exitstatus": {"doc_hash": "66586a00a56a8d824334be998716acb4ad6ffe81ac87fe3e05ab3fe885182042"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_pytest_cmdline_main_pytest_runtestloop.return.True": {"doc_hash": "31e245ff7d0607b85d8bfa591bf7ddc4a46fc2eadd667f2f5adc3b5cf356d695"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py__in_venv__in_venv.return.any_fname_basename_in_ac": {"doc_hash": "3132bd4fe15d5bfe39b32d72513ba219df1d925d68d82bf7d3a040e0963a4275"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_pytest_ignore_collect_pytest_collection_modifyitems.if_deselected_.items_remaining": {"doc_hash": "6cb1d503ffa0332407932f57f3df562f7c50ba424cde30bccd99e93ee2a0cc00"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py__patched_find_module__patched_find_module.if_six_PY2_python_3_4.else_.yield": {"doc_hash": "c420f46b356bf168e59b27315256cde9edfe893f4e9c76e21ff0d8f9d1757d29"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_FSHookProxy__bestrelpath_cache.__missing__.return.r": {"doc_hash": "8c32facc1ed32d6e7cd65e73f97c693e6b71ca47ce87a9d63d05a04d002ace60"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session_Session.isinitpath.return.path_in_self__initialpath": {"doc_hash": "df28563a4b5cb9b33a9a9676bdd8152b6b8ebf36d1c3e672080dd1ddcc9e0fe0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session.gethookproxy_Session.perform_collect.return.items": {"doc_hash": "ae02ccadc3fdfca25e86fabdf736a20131c647be5c11b24b385030b3031851e9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session._perform_collect_Session.collect.for_initialpart_in_self__.None_1": {"doc_hash": "6bc195564d723ba73fc68a9a120e8a963a0663fbd0fa85fb25bb3d57c933d6e6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session._collect_Session._collect.if_argpath_check_dir_1_.else_.for_y_in_m_.yield_y": {"doc_hash": "7857fdd0ebe7b4747338f546dce3195b2b2933f91a71d638a5831146806d0c85"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session._collectfile_Session._collectfile.return.ihook_pytest_collect_file": {"doc_hash": "ddfc105f138630969ce9648f9d98b5d884f16b6644bbd21e6f6f9cb618e279a0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session._recurse_Session.if_six_PY2_.else_._visit_filter.return.f_check_file_1_": {"doc_hash": "80b242b917363340401a6befb8cfae5e3818c03e70b5fb107efd8751d3ba4b7f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session._tryconvertpyarg_Session._tryconvertpyarg.return.path": {"doc_hash": "4d96555950f82744a9f0346023ed5119eb79d734d402683b6af1d2973d498317"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session._parsearg_Session.matchnodes.return.nodes": {"doc_hash": "d811fc22496accf04f649a32274db87a2037a7b5d09cfd005c1740ef27ef2f64"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session._matchnodes_": {"doc_hash": "20afa07dcc02a49395c57aa6326e62207ecb80bc0d1ff7515b8bf9e5b635dc75"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/__init__.py__generic_mechanism_for___all__._Mark_MarkDecorator_": {"doc_hash": "289506fa15dd97dbbc3e3578471c3fc49acab4bf65f7b57599d9dd0a86018e60"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/__init__.py_param_param.return.ParameterSet_param_value": {"doc_hash": "6972c7252b128b0bac101691168a1a02a35ba308ec1bb5b9b31fde1dba2b7810"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/__init__.py_pytest_addoption_pytest_addoption.parser_addini_EMPTY_PARAM": {"doc_hash": "53326c60f952770538da000381be9d30ad07df8c13c40a2d60074c90262897b9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/__init__.py_pytest_cmdline_main_pytest_cmdline_main.tryfirst.True": {"doc_hash": "52c30252427903bd028ec13b7c8a2df231623bc36910d6be061ff31b8c019e6f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/__init__.py_deselect_by_keyword_deselect_by_keyword.if_deselected_.items_remaining": {"doc_hash": "63ac5c65a18b894b0c9e605120cf6edb1a0d76facaf6cecbf936cc5ebc4fec57"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/__init__.py_deselect_by_mark_": {"doc_hash": "55c0c23062c275b4d345dd96fa1ddcb1e22d9e910e35f05c1d5bda37a26c9ca0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/evaluate.py_os_MarkEvaluator.invalidraise.return.not_isinstance_exc_raise": {"doc_hash": "a3ecda20f8d8fd507d10da07cd727eefd32a1b2e86fcb985dc34ab735f668d0e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/evaluate.py_MarkEvaluator.istrue_MarkEvaluator._getglobals.return.d": {"doc_hash": "7e4bb39ded96c1b9882a1bc094f349dc542282960b4d70b51021e3725a71351a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/evaluate.py_MarkEvaluator._istrue_": {"doc_hash": "49d13508be8503b8be6459a9a7917595a13107eb751b1543e86f2151bce88a7e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/legacy.py___MarkMapping.__getitem__.return.name_in_self_own_mark_nam": {"doc_hash": "1270e4edbe2a437dc1690918b3678d586a079a1e0c71699b7afa5b8e14c10782"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/legacy.py_KeywordMapping_matchmark.try_.except_SyntaxError_as_e_.raise_SyntaxError_str_e_": {"doc_hash": "c6bc12ddc2d131eda8c4a78126ef800554e7b7a2351796eb7ecc20e7f937ef01"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/legacy.py_matchkeyword_": {"doc_hash": "34c58dcfa846368df5d699dfe8b66d6c75ea1fea4596012fdcc9f145d06c4549"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_inspect_istestfunc.return._": {"doc_hash": "d5dde5f3bab1fff08efef752998a33999de107370e643038478a6f91d770e33e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_get_empty_parameterset_mark_get_empty_parameterset_mark.return.mark_reason_reason_": {"doc_hash": "2662f03b06323f4bc3a5d1982d3aa95384f16ecfb264fabe9a945e6c0fe95801"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_ParameterSet_ParameterSet.param.return.cls_values_marks_id__": {"doc_hash": "a7d10a0f2c0b0bd800cd3705859f2b1613b076a6d7fc5ae52cf622046f689684"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_ParameterSet.extract_from_ParameterSet.extract_from.if_force_tuple_.else_.return.cls_parameterset_marks_": {"doc_hash": "1539efa2436826d6f8d3f628ee6a0d8e12a2f8861c43ba40627817fc6d720ce6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_ParameterSet._for_parametrize_ParameterSet._for_parametrize.return.argnames_parameters": {"doc_hash": "8fe8b82c6c84d7bf88ede7c1302b7d51b272cc25c07baa22b40fd66ce5c17567"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_Mark_Mark.combined_with.return.Mark_": {"doc_hash": "240311800d2c3bcc0134feb08fdf39da38b1e2d997cba40292c722628d9c991c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_MarkDecorator_MarkDecorator.__call__.return.self_with_args_args_k": {"doc_hash": "0413336e4e3143f566b8f61beecee29a1fff5125883b2c8daca32042a989f27c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_get_unpacked_marks_store_mark.obj.pytestmark.get_unpacked_marks_obj_": {"doc_hash": "2086f8a6998496c24e3bd1220a2d2d47ec02ab2e67b70789eee9b1cb6dee1bc3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_MarkGenerator_MarkGenerator.__getattr__.return.MarkDecorator_Mark_name_": {"doc_hash": "1becc98635d35fb5b0f815a69cb90718f7097df9d7a22f7fb1a6ca88cd7f6e9c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_MARK_GEN_NodeKeywords.__repr__.return._NodeKeywords_for_node_": {"doc_hash": "402d3267784c2109a6a91bc5232e652be075d354b50a2adc9d2b17887a8a7ba7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_NodeMarkers_": {"doc_hash": "25c5dfbd79f208ba9961b8b666c2ef5241d1314d3ddfe44876b53c8e13547aa0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py__monkeypatching_and_mo_monkeypatch.mpatch_undo_": {"doc_hash": "07f2ae3ce53bb13e215babd6fea9cf2d9aa0df45ac9644c5f96daf7a5fedd073"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_resolve_resolve.return.found": {"doc_hash": "6f4b055652c239a38e222889327396bc980840e7b85aeeb2f2d5dbf9866a6b33"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_annotated_getattr_notset.Notset_": {"doc_hash": "4d20e5b2c421022bf3a620c29de976a8936c9272512ed426c9dd6cc22e4b4cd1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_MonkeyPatch_MonkeyPatch.context.try_.finally_.m_undo_": {"doc_hash": "877df2c9860c964e2c14ae41022fb07fca094b635dea8fe50b4d139c3c024048"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_MonkeyPatch.setattr_MonkeyPatch.setattr.setattr_target_name_val": {"doc_hash": "935466c853194c4d593cabf34b605764e41d5b5e82f15ca6c39873c807e23efd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_MonkeyPatch.delattr_MonkeyPatch.delattr.if_not_hasattr_target_na.else_.delattr_target_name_": {"doc_hash": "39d319e167ca4d82a3c6c7530090661a63453a214340a920ae217ee131cdf4d5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_MonkeyPatch.setitem_MonkeyPatch._warn_if_env_name_is_not_str.if_six_PY2_and_not_isinst.warnings_warn_": {"doc_hash": "dd307138c3bf4d6356a624eab14513cd67b8406f3e664611624be55cd2fdced3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_MonkeyPatch.setenv_MonkeyPatch.delenv.self_delitem_os_environ_": {"doc_hash": "16c5272d1d161f8e5ba0f21b6b07a570047a3979f150226d140a392bb64c5892"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_MonkeyPatch.syspath_prepend_MonkeyPatch.chdir.if_hasattr_path_chdir_.else_.os_chdir_path_": {"doc_hash": "280ea66fa4ef4c4e2d60cb06d059688f10aa12584558e621be7bd091c4ac2c88"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_MonkeyPatch.undo_": {"doc_hash": "7ea7e5a2625bb8cff5794ee5787fd54be9d385057389a0c7b3ebd204c244eeb7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_from___future___import_ab__splitnode.return.parts": {"doc_hash": "0350ae859e90ced86513fbe58b9fee8006dbeeb05931174513ca760e098b2de0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_ischildnode_ischildnode.return.node_parts_len_base_par": {"doc_hash": "8dc7d141b8ea2d9301f148cae1e6940be8e5588db4e0bb34e29d5f2bb7fd1e51"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_Node_Node.__repr__.return._s_s_self___class": {"doc_hash": "d72d06f5e1785714c3542f923de291ccb367049940612e5a3c9326f72da7baff"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_Node.warn_Node.warn.warnings_warn_explicit_": {"doc_hash": "c36c567af73a41272d57faeb3a2c91b46ff6f9ed49b4f412d6b57b0155ab37ab"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_Node._methods_for_ordering_no_Node.listchain.return.chain": {"doc_hash": "cf91fbe1911868b701f3f3dd3fe3f607074af029fd30c60dfe1d250609d39e43"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_Node.add_marker_Node.add_marker.if_append_.else_.self_own_markers_insert_0": {"doc_hash": "0b7036b6f6b9247fe6e517d5ecc40b925e0ee6d30baa6fe8628d07f178a8e6db"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_Node.iter_markers_Node._prunetraceback.pass": {"doc_hash": "f633f4dd246f08a37268f67408284ae2ccd4d25497866ecd609c41223f1d02de"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_Node._repr_failure_py_Node.repr_failure._repr_failure_py": {"doc_hash": "31414267bab6d576de58383f687b7ff6fa55338d40de3b4d24a506a9d26de7ac"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_get_fslocation_from_item_get_fslocation_from_item.return.getattr_item_fspath_": {"doc_hash": "55475d43b4e19c9c05b5d5f0604c3a2def3e10b5b906d3e583ab8682e6a2e670"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_Collector_Collector._prunetraceback.if_hasattr_self_fspath_.excinfo.traceback.ntraceback_filter_": {"doc_hash": "47a150edc9320f12ab64946692973ef10796f801a8ae44a8303857cea628a9b6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py__check_initialpaths_for_relpath_FSCollector.__init__.super_FSCollector_self_": {"doc_hash": "917687c848835c363f50fa0f47b293e7f3c915bd01126dd986f8db504053315d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_File_Item.__init__.self.user_properties._": {"doc_hash": "1d3d6e65613770307117be52a19d695f7c6d46eadd456a59de2c24b181954299"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_Item.add_report_section_": {"doc_hash": "1e5e2526985c4c542a406c27e899db1f811aaa59e049d4dbb108122fc33a59e1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nose.py__run_test_suites_writt_": {"doc_hash": "d51f2119475a4a15293c5e810d5b416e2b0fe3ffa0b86306588ce73834700661"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/outcomes.py___OutcomeException.__str__.__repr__": {"doc_hash": "7a6740e75867ebcf2a1e7ff3f1155d1128818fea61b7b136f8031f30a60d98b9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/outcomes.py_TEST_OUTCOME_exit.Exception.Exit": {"doc_hash": "07c66fb35f45e4d0bf06d1ecd017ddbbde6223b79ef1b4e755e0357e69140ed7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/outcomes.py_skip_skip.raise_Skipped_msg_msg_al": {"doc_hash": "e0065c095abd4355749180ccdba01bb5bb1d60e0503abbaef55ae8ac628673ad"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/outcomes.py_skip.Exception_xfail.Exception.XFailed": {"doc_hash": "f11ccce01e85c4769e28dcaeffe399bc35545467a42222bae51fe3a623df7426"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/outcomes.py_importorskip_": {"doc_hash": "af890721ae343face16fa12bb9e8fd0918e0be77f9be8dc883191e82017ca48c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pastebin.py__submit_failure_or_tes_pytest_addoption.group__addoption_": {"doc_hash": "44f16cd48c0192e59cf56d85a5b912b8cc0da0754180f3a7a426dc2ba4292f3e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pastebin.py_pytest_configure_pytest_configure.if_config_option_pastebin.if_tr_is_not_None_.tr._tw.write.tee_write": {"doc_hash": "daccc04c064d718d8e5aa38dd760f88550bd9d43f382f91d00c4fd684959fa93"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pastebin.py_pytest_unconfigure_pytest_unconfigure.if_hasattr_config__past.tr_write_line_pastebin_s": {"doc_hash": "b31c22a56ea9da90024921a49228046dabd9cc990a172b32ecca8a71dee7bc1d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pastebin.py_create_new_paste_create_new_paste.if_m_.else_.return._bad_response_respon": {"doc_hash": "a1e09fb3364493a2a2afb4419b1219504b5bc302075258ca766a925ccea8af9d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pastebin.py_pytest_terminal_summary_": {"doc_hash": "a18ac07231c2acae12aa6f662de23085c5e985cdf1e17f91f60e214c03650649"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pathlib.py_atexit_if_six_PY2_.else_._max.max": {"doc_hash": "4edf2280a90a6f4af0581f3f818793592838ce79d0e91dfcf475cad2ccbf3cdd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pathlib.py__force_symlink__force_symlink.None_1.except_Exception_.pass": {"doc_hash": "2c9092af07556a119a0a7460270dc874a4def42c2be27c25d6980b45d6d78ddf"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pathlib.py_make_numbered_dir_make_numbered_dir.for_i_in_range_10_.else_.raise_EnvironmentError_": {"doc_hash": "14f40887542d7baccebf73ac420a1d8d01f9fbb2ba0e7802720f571dc7b9449a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pathlib.py_create_cleanup_lock_register_cleanup_lock_removal.return.register_cleanup_on_exit_": {"doc_hash": "d41f5877e44bc985500396bdd894ba7636cebeadcf646a023dddcdd6b0ce28a2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pathlib.py_maybe_delete_a_numbered_dir_maybe_delete_a_numbered_dir.try_.finally_.if_lock_path_is_not_None_.try_.except_OSError_IOError_.pass": {"doc_hash": "21d77c2d9245cf8ec209705376e4ae2fb5d581415aa5bf03020f8a90d1817463"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pathlib.py_ensure_deletable_cleanup_numbered_dir.for_path_in_root_glob_ga.try_cleanup_path_conside": {"doc_hash": "e3adb38b8c5a0debfe4d221944f2a227a87ffb688fbdc88d280b3793604e61b3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pathlib.py_make_numbered_dir_with_cleanup_resolve_from_str.if_isabs_input_.else_.return.root_joinpath_input_": {"doc_hash": "79378baed4e79a21a65b682638601def617904be0af292cd79a9543d7057f5cc"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pathlib.py_fnmatch_ex_": {"doc_hash": "b4c0883074da04626e320c02f1a5a19cbf16470fd4fbe77c3452959ceff3696f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py__disabled_by_default__IGNORE_PAM._filenames_added_when": {"doc_hash": "e0ca5eebfb5bf56fb16bce84c95fe6cd0dd43d57d8fabe45bc56147e0e706a14"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_pytest_addoption_pytest_addoption.parser_addini_": {"doc_hash": "b27a62a335dcbf39b7e0523e21d248593fcedbc8545c4ee146419cb4ea8ffd4c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_pytest_configure_raise_on_kwargs.if_kwargs_pragma_no_.raise_TypeError_": {"doc_hash": "5be2590d7d83884249e5cd8875060e9db0c4892560806a71f9390c07facceb45"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_LsofFdLeakChecker_LsofFdLeakChecker._exec_lsof.with_open_os_devnull_wb.return.subprocess_check_output_": {"doc_hash": "cf36035ef5b10e178360c5bda1f5c7de8b012b83ad2c80287abe992fb6ba8d99"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_LsofFdLeakChecker._parse_lsof_output_LsofFdLeakChecker.matching_platform.try_.else_.return.True": {"doc_hash": "9b8317bb33c98c91eb1eb749b7f2f894918c39bbe4ad5278d76d1fe9e1827c4c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_LsofFdLeakChecker.pytest_runtest_protocol_LsofFdLeakChecker.pytest_runtest_protocol.if_leaked_files_.item_warn_pytest_PytestWa": {"doc_hash": "c618ba33ffbd55a61d15f129d2e0028ac71b0b3833683b66bee68102e2449307"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py__XXX_copied_from_execnet_ParsedCall.__repr__.return._ParsedCall_r_r_": {"doc_hash": "d80de3cf2d207497c81ce0159cb5856dcb5a9bd3bcf5e8497629c2b221683b38"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_HookRecorder_HookRecorder.getcalls.return._call_for_call_in_self_ca": {"doc_hash": "731077326a604e7870be0fcc16fb5d3461d81d7123a99bcce40e8562eca0c670"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_HookRecorder.assert_contains_HookRecorder.assert_contains.while_entries_.for_ind_call_in_enumerat.else_.pytest_fail_could_not_fi": {"doc_hash": "e919e2a7caf990adb49c47f40ecafa1a32986105c12ac9f634c7933c8d9ddce6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_HookRecorder.popcall_HookRecorder.getreports.return._x_report_for_x_in_self_g": {"doc_hash": "fa7e85dd6822af46eb654bff753ed60567c1bc899644618915b152c54e6d8bf7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_HookRecorder.matchreport_HookRecorder.matchreport.return.values_0_": {"doc_hash": "72f9d1f14ff5959a7094bd52a988936bddfa8786bddbb3d2d8884f8b7d58244e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_HookRecorder.getfailures_HookRecorder.clear.self_calls_": {"doc_hash": "636c98494c0234d2a6b02ed2a965448eb2f780ac2cce8401142aee3b5a8b4665"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_linecomp_rex_outcome.re_compile_r_d_w_": {"doc_hash": "df099dfd349b3f7ce8a0f53b82f141a20031cc53488c3b2e0ac79ea4ca903e8f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_RunResult_RunResult.parseoutcomes.raise_ValueError_Pytest_": {"doc_hash": "cd7d38174e1e5cadb0583c2c61400006da39fae19a29c5fe7217c79215a9e538"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_RunResult.assert_outcomes_RunResult.assert_outcomes.assert_obtained_expect": {"doc_hash": "0da0c5a98908aac086c90da251232b4dd868c064c6205647396eed9031a84247"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_CwdSnapshot_SysPathsSnapshot.restore.sys_path_sys_meta_pat": {"doc_hash": "05f0fdb474c0e676b9d54c94b947c02699d1d706a2449199f77ea6d58a9ae566"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir_Testdir.chdir.self_tmpdir_chdir_": {"doc_hash": "af1f269fe1500d1c6d7775c5479390a7ba9304f54417d8703d6606e10f5a7b78"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir._makefile_Testdir._makefile.return.ret": {"doc_hash": "fdc5b1fa42378bea4583c61b1c7c528f610b8e89e7dbd50ee3d1fbcf9d84079a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.makefile_Testdir.makefile.return.self__makefile_ext_args_": {"doc_hash": "06d16565348ec3f38ed2213fb6a99766f5e5eaef961d31ce2ed46abafd08c9f6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.makeconftest_Testdir.mkpydir.return.p": {"doc_hash": "5af35f83c5f5749acee8b73de05bc54eff460d0e63c50b8fb59672841960b887"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.copy_example_Testdir.copy_example.if_example_path_isdir_a.else_.raise_LookupError_": {"doc_hash": "196e874081477da1dc8ff756edc21514c2afdf11d7297704520252135b01f054"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.Session_Testdir.getnode.return.res": {"doc_hash": "94592ed7115ca014c00edf1fd87287644481b30136eef9cf2f813131b9767544"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.getpathnode_Testdir.getpathnode.return.res": {"doc_hash": "3bc953f63673c4beb2bb1fc3c9aa6bc6e1e383d8c6136bc89fbf057265547740"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.genitems_Testdir.runitem.return.runner_item_": {"doc_hash": "44a4a2aebb798b94fce34e5b87a947b316de0d5a7e84a7cb6e2833fece04aa0c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.inline_runsource_Testdir.inline_runsource.return.self_inline_run_values_": {"doc_hash": "f1a9b9356203f7df54304493c9cc6a933ed562e6ddb6d4b8f08af27daa6a2ee0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.inline_genitems_Testdir.inline_genitems.return.items_rec": {"doc_hash": "55045869dcb14930b05f2152748bc5913a8c163d2e7f0cbeb17adde71b9b8571"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.inline_run_Testdir.inline_run.try_.finally_.for_finalizer_in_finalize.finalizer_": {"doc_hash": "d7032e735fbdd1300fe03ec83956bce695ab6361742c32ece7d9c6441374267e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.runpytest_inprocess_Testdir.runpytest_inprocess.return.res": {"doc_hash": "8c1b521038e426b48e2ddc635905faf87c2eaec28e421acb5f3ede034840de7f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.runpytest_Testdir._ensure_basetemp.return.args": {"doc_hash": "7ba5d999ad4dad8a26a0ddaa4946f46101112103f1f1e1be6459a9b90aec3224"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.parseconfig_Testdir.parseconfig.return.config": {"doc_hash": "88889976b8715f385f07d0975a3b8ef6ba3b1d067ad2c983179dad34d892f7eb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.parseconfigure_Testdir.getitem.assert_0_r_item_not_fo": {"doc_hash": "40816185f7cd032ff990e7935472554777051880b46b5adf24f1a14f733d2365"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.getitems_Testdir.getmodulecol.return.self_getnode_config_path": {"doc_hash": "f34ffcc4ec090e2f05bf392ef1b7dac939f00822b526dbdaf7e7b534fb330d96"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.collect_by_name_Testdir.collect_by_name.for_colitem_in_self__mod_.if_colitem_name_name_.return.colitem": {"doc_hash": "7c44d8e8255d049420880e13242c56ac779ecd4aaee1fe06befd3a79fc8a5e0f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.popen_Testdir.popen.return.popen": {"doc_hash": "96a9e5e3dc3682a4cba9664a91f78c35fde1a393a9e5b06afb0d9c6fd35cf11a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.run_Testdir.run.return.RunResult_ret_out_err_": {"doc_hash": "a237967e89eb058b90a1ca60b548e4d64cadd81354e9673fff4405a8aa4e1428"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir._dump_lines_Testdir.runpython_c.return.self_run_sys_executable_": {"doc_hash": "d9d3f5418979c3dc8b629818c06c40467fa3041c49d8d888276def0dbb9be542"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.runpytest_subprocess_Testdir.runpytest_subprocess.return.self_run_args_timeout_t": {"doc_hash": "e8cd059d3f5d6c40003174a6ea79ea9112ee224da31dbe2641fae8c15ab16153"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.spawn_pytest_Testdir.spawn_pytest.return.self_spawn_cmd_expect_ti": {"doc_hash": "698d2b4486bd814eec13282c646f39cca6efcedb4476edfc4ab9d3b1e743ccc6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.spawn_Testdir.spawn.return.child": {"doc_hash": "08975aa7448018d9a593638d1424c859dde90c25a9dfd7c52a094be1e96631cc"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_getdecoded_LineComp.assert_contains_lines.return.LineMatcher_lines1_fnmat": {"doc_hash": "8d390c0e7d58792706ce522a42b5512097df15ac2bbb0f1e970e74f9272af99f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_LineMatcher_LineMatcher.re_match_lines_random.self__match_lines_random_": {"doc_hash": "fb8a86074921b7c6461787e32dca8cff9a708a94720d10450ced70d7a870acd3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_LineMatcher._match_lines_random_LineMatcher._match_lines_random.for_line_in_lines2_.for_x_in_self_lines_.else_.raise_ValueError_self__lo": {"doc_hash": "04dea047fad4085f698f01ff9e5b7aab073c8edd10a4956b854daaca6e5848d1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_LineMatcher.get_lines_after_LineMatcher.re_match_lines.self__match_lines_lines2_": {"doc_hash": "950c3cacbf79811abf09d13fb64eb3579e7627592de6886ec9032e5b86fe22ac"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_LineMatcher._match_lines_": {"doc_hash": "74747c3488b19b93371a26af5eb501d0557cae143cc9a8cce78b4dbc1fb89483"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__Python_test_discovery_pyobj_property.return.property_get_None_None_": {"doc_hash": "b6f4fb943b7c60c186620315b2d4cb1ba05cf5590e7e3dd786bd8152740fb8a3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_pytest_addoption_pytest_addoption.None_7": {"doc_hash": "6f7af3c63de4d97fedede472c8191e61d50de3c82d52d189ff6f75165d351992"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_pytest_cmdline_main_pytest_generate_tests.for_marker_in_metafunc_de.metafunc_parametrize_mar": {"doc_hash": "de1bed76293017996f2f197cb7dc2110e677ab01dac1c0fa7b06bf276bc96704"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_pytest_configure_pytest_configure.None_1": {"doc_hash": "ba90d25c79d809c73749400906e289942c3a201b8c1e01389780299daee7ea18"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_pytest_pyfunc_call_pytest_pyfunc_call.return.True": {"doc_hash": "f2314114cfaaa4e97e434b1b4571281a31b09f2726c80c032a30821cc3c48963"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_pytest_collect_file_pytest_pycollect_makemodule.return.Module_path_parent_": {"doc_hash": "884b352c742bfcbac5c655587f25d1111c1ad29ead69c85d8706818d6dc59bd2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_pytest_pycollect_makeitem_pytest_pycollect_makeitem.if_safe_isclass_obj_.elif_collector_istestfunc.if_not_isfunction_obj_o.elif_getattr_obj___test.outcome_force_result_res_": {"doc_hash": "df0e4e7aaecd598c0765f11728e2ed043180b70251230098811b52b92dd37318"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_pytest_make_parametrize_id_PyobjMixin._getobj.return.getattr_self_parent_obj_": {"doc_hash": "a05af47a391ddfa147440e218dd8ab0a6629596853b26da634e44f3d79a1ce78"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_PyobjMixin.getmodpath_PyobjMixin.getmodpath.return.s_replace_": {"doc_hash": "0dcf9effb634de2f100f3387ec7836c38ca99cabb472969dfde7509cecca5865"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_PyobjMixin.reportinfo_PyobjMixin.reportinfo.return.fspath_lineno_modpath": {"doc_hash": "fe35bf74bc70ffb88d7e67e86ccd0b919bc2203f56d95786d428edb7792ed200"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_PyCollector_PyCollector.istestclass.return.self_classnamefilter_name": {"doc_hash": "3f519b8213182848788a36bf996ca08c4d907bb1d29b39381b2685f1a2a2de68"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_PyCollector._matches_prefix_or_glob_option_PyCollector._matches_prefix_or_glob_option.return.False": {"doc_hash": "1aca4db076184ffab76ca253af5d86337f7a64d8bd210e4baba8462e904c0433"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_PyCollector.collect_PyCollector._makeitem.return.self_ihook_pytest_pycolle": {"doc_hash": "2cdd5e38cd35606e23b69942d83bba227a97ed23a935346921d0d0643fe3628e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_PyCollector._genfunctions_PyCollector._genfunctions.if_not_metafunc__calls_.else_.for_callspec_in_metafunc_.yield_Function_": {"doc_hash": "9ae507c069f0382aede581f602dfc8f4d80bd6b44bf3edbd6948e5b0c12de4b5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Module_Module._inject_setup_module_fixture.self.obj.__pytest_setup_module.xunit_setup_module_fixtur": {"doc_hash": "451312e3d341f5b93d9e93cf734fb1bce84c577f2e8bab01c5d1d22c6690a0a8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Module._inject_setup_function_fixture_Module._inject_setup_function_fixture.self.obj.__pytest_setup_function.xunit_setup_function_fixt": {"doc_hash": "f55d6d880cdc7064fcaeca8de5803ae50a72794ee78f0c55781435ed673900d3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Module._importtestmodule_Module._importtestmodule.return.mod": {"doc_hash": "5458ea3f389de5bd5642d5f44392bb41602d7b32da41c7a286234968487554ea"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Package_Package.setup.if_teardown_module_is_not.self_addfinalizer_func_": {"doc_hash": "0e4f184e199b1851b9c5f09fb4f93109ba9a367ed7e3f0caf785b253a54e5eac"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Package._recurse_Package.gethookproxy.return.proxy": {"doc_hash": "2ac2e92c3890cc2e7703ad01d561c6f40b8e5acddb8442d12ebfed1fba6618be"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Package._collectfile_Package.isinitpath.return.path_in_self_session__ini": {"doc_hash": "262779f7d76e2fdf05c38132455e72d441508c14a18a392aff07b3d2c1fdb65a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Package.collect_Package.collect.for_path_in_this_path_vis.None_2.elif_path_join___init___.pkg_prefixes_add_path_": {"doc_hash": "85064aabe09dfd6d84cd39ed693893ac69deb023743a32251578a55b9b63b77b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__get_xunit_setup_teardown__get_xunit_setup_teardown.if_result_is_not_None_.if_arg_count_.else_.return.result": {"doc_hash": "e32498f42698ba84b1e9ab89c1137a7ee207cd79fc4f8d52dbb90d353dd27883"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__call_with_optional_argument__get_non_fixture_func.if_fixtures_getfixturemar.return.meth": {"doc_hash": "9d917a2ceb864ca50b92e2a27fb6bfa0b7870306bad253e72d2479c1dbd4acda"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Class_Class.collect.return._Instance_name_pare": {"doc_hash": "883082d76c3227ee5c951f3aa520e1e0e4fd4d15cc5c39831fb59617944d89aa"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Class._inject_setup_class_fixture_Class._inject_setup_class_fixture.self.obj.__pytest_setup_class.xunit_setup_class_fixture": {"doc_hash": "7c4c2982e5b3148a9e04f5b064868ee95c945db75a4bcd5f5db06c6a8a41cc69"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Class._inject_setup_method_fixture_Instance.newinstance.return.self_obj": {"doc_hash": "54ed36a7bc1327f4593421da3661864e624eb357cb4d8b361edb4a52eb7721b4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_FunctionMixin_FunctionMixin.repr_failure.return.self__repr_failure_py_exc": {"doc_hash": "1efad44620b37e0ee78eeed91f4fc95ffa2dde74114ba0555cf00754ddf8f5db"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_hasinit_CallSpec2.id.return._join_map_str_filter_": {"doc_hash": "4aefbc8e43f89913120fa78f6b03e2cabe8f10365421ddda11da2bb08f873ad3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_CallSpec2.setmulti2_CallSpec2.setall.for_arg_in_funcargs_.self__arg2scopenum_arg_": {"doc_hash": "0b6a50c5ef8805cb029ac9d0b5ca7d2aceacac7afd8fdb7213c30f93501d6dfc"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Metafunc_Metafunc.__init__.self._arg2fixturedefs.fixtureinfo_name2fixtured": {"doc_hash": "5b25842b7f62d141c70ac1f33855f88db332abb1dbace2591648663fa988bc78"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Metafunc.parametrize_Metafunc.parametrize.self._calls.newcalls": {"doc_hash": "4c9daa5712e6393578a1cc6cb300c920280f7a22bf0a3b391241ab38f0575879"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Metafunc._resolve_arg_ids_Metafunc._resolve_arg_ids.return.ids": {"doc_hash": "6b93f7c1d22a6098666ddb9deabe81195b25bd7c21e76df5e68499af3ec0e186"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Metafunc._resolve_arg_value_types_Metafunc._resolve_arg_value_types.return.valtypes": {"doc_hash": "ae54762753fefe4a670613655df34ba665ff1fe07933e79efb70496c8fdff1a8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Metafunc._validate_if_using_arg_names_Metafunc._validate_if_using_arg_names.for_arg_in_argnames_.if_arg_not_in_self_fixtur.if_arg_in_default_arg_nam.else_.fail_": {"doc_hash": "1d663779b030ec512f18b067ecc5ff611e9980fe913ffd216d2732b3c929e7d6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__find_parametrized_scope__ascii_escaped_by_config.return.val_if_escape_option_else": {"doc_hash": "e2a32edfffa151711e8a9e4a2ba8956a7eea91f8b8f223a4cc98362a781512fc"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__idval__idval.return.str_argname_str_idx_": {"doc_hash": "4d8f27ab7518748b9f4b9fa822da80e65183ed28c807868f78d7a016a931a484"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__idvalset__idvalset.if_ids_is_None_or_idx_.else_.return.ascii_escaped_ids_idx_": {"doc_hash": "a1f6f1bb95cbe68948c28d31c3c2d009dc5ddd505fa401e5904ea24f147ce7b3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_idmaker_show_fixtures_per_test.return.wrap_session_config__sho": {"doc_hash": "9c1bbb0b9c9256bf5e3bbc2d47b940fc844a3d6ab13ec1dd7d023510194ca37b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__show_fixtures_per_test__show_fixtures_per_test.write_fixture.if_fixture_doc_.else_.tw_line_no_docstring": {"doc_hash": "a34dd45f097ca9f04486228d2b330c91ae585bd9b0391217b61e1b212078c4b4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__show_fixtures_per_test.write_item_showfixtures.return.wrap_session_config__sho": {"doc_hash": "e1b1c9a746c065efd43d6d41e89a4d09d90dd5e73d5315b3bb742271f322c443"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__showfixtures_main_write_docstring.if_rest_.for_line_in_dedent_rest_.tw_write_indent_line_": {"doc_hash": "fa8b01cd89e405cf19d4e2f46ef4f04fe87edba332d18331dae81c26e5dca656"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Function_": {"doc_hash": "530b398eaee5586885f5b74332865b55fffaa541b247a19b84d5ffce9af86003"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_from___future___import_ab__non_numeric_type_error.return.TypeError_": {"doc_hash": "b74b9686ebe519c3c760fda5046f92966b12af512c558733de59a3cbfdc6741e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py__builtin_pytest_approx_h_ApproxBase._check_type.pass": {"doc_hash": "08868acb8b3e890c26027d4c36067798cd46b62ae3de1e2de64e5ef73b74e37e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py__recursive_list_map_ApproxNumpy.__eq__.return.ApproxBase___eq___self_a": {"doc_hash": "09d63de3b1b38e4390145d73e95a5398e0d196713226d8b721fd6b9ab0c16068"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_ApproxNumpy._yield_comparisons_ApproxNumpy._yield_comparisons.if_np_isscalar_actual_.else_.for_i_in_np_ndindex_self_.yield_actual_i_item_s": {"doc_hash": "98f10bed1b8b380c8a9b8e9b1ea57a92cc5e6da945b91c8539826d5a92c0adf7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_ApproxMapping_ApproxMapping._check_type.for_key_value_in_self_ex.if_isinstance_value_type.elif_not_isinstance_value.raise__non_numeric_type_e": {"doc_hash": "d1199d1fe1e9dd78a0b03f00a375d655ae783be34cbb160ed768fe97eb36825c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_ApproxSequencelike_ApproxSequencelike._check_type.for_index_x_in_enumerate.if_isinstance_x_type_sel.elif_not_isinstance_x_Nu.raise__non_numeric_type_e": {"doc_hash": "75777ca3230b1b1d8825bfd883506810cea7d738bcb00b0e62cae4b6be61c5e7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_ApproxScalar_ApproxScalar.__repr__.if_sys_version_info_0_.else_.return.u_u00b1_format_se": {"doc_hash": "476ba3526fc8d56fc649dfea7e7e0e3d8d659e492ff42582b6faa79d4116ac5d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_ApproxScalar.__eq___ApproxScalar.__hash__.None": {"doc_hash": "e4fbc1effaf5b1960fb7b03f423e2837730e68462a36a89f8909103d0fed348d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_ApproxScalar.tolerance_approx": {"doc_hash": "f85c026011bd51d89641504097a73e6cdebc6450aa1d9fc3302964182a102d58"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_approx.__approx._": {"doc_hash": "b18eae9fb66cdbbe6faa7cc748900b818d4b23d47816353ba2c46e62a6b676fc"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_approx._Delegate_the_comparison__is_numpy_array.return.False": {"doc_hash": "55f2b9be3099e65de3b1049538d77b5a2f8adc3e692f20a136d15dacbe73ab65"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py__builtin_pytest_raises_h_raises.r_": {"doc_hash": "321a089d3c566cc2924c2cba7ad7bdd9188dbb120f3b820d9dd27c5a98cea265"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_raises.__tracebackhide___raises.fail_message_": {"doc_hash": "0d0dce309e668d07cd4d39d44496065a77602581232f1192459d7c33e269b29d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_raises.Exception_": {"doc_hash": "9d36c3be6f1e3dc88c85134d4366659520058a0b3690a205ed0ade7e79faa981"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/recwarn.py__recording_warnings_du_recwarn.with_wrec_.yield_wrec": {"doc_hash": "170132d8d70e664826d80fc0027e97cf8400673fdb7383a6e35a339c97d09b87"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/recwarn.py_deprecated_call_deprecated_call.return.warns_DeprecationWarning": {"doc_hash": "6429482640cf5d924f63293bc8ccc21038cec2dcf60970ee22d8c65031f04d90"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/recwarn.py_warns_warns.if_not_args_.else_.with_WarningsChecker_expe.return.func_args_1_kwargs_": {"doc_hash": "7e29c8b1f729304eea41d380dd5e8827fd28e8c1fc63f7fd150ac2532756485e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/recwarn.py_WarningsRecorder_WarningsRecorder.clear.self__list_": {"doc_hash": "010227fe4c14aafcfee44d85a7bf4ed2bf015f46640639d5267551a11495ee4d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/recwarn.py_WarningsRecorder.__enter___WarningsRecorder.__enter__.return.self": {"doc_hash": "3b3426609afd6faab56ceb380a1b25d04a5c626fef49cc8d285419aa5d60a3ae"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/recwarn.py_WarningsRecorder.__exit___WarningsRecorder.__exit__.self._entered.False": {"doc_hash": "96578403d649fc2df8279d278fdf1f0bedda20a7f40f987cb239378473580bf1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/recwarn.py_WarningsChecker_WarningsChecker.__init__.self.match_expr.match_expr": {"doc_hash": "c15a611bc2b6e868a83e885c37a0526bd43f5228805ba7371ea65ea41a97d9ab"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/recwarn.py_WarningsChecker.__exit___": {"doc_hash": "26e3369415f7f489f3a2ad75e1b4bfa10fe3218d23a63d24b4be448b8772fe11"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_from_pprint_import_pprint_getslaveinfoline.try_.except_AttributeError_.return.s": {"doc_hash": "ef8d3daef1dd82b60fbc111b92bbc109cf33ad5b72dd9b1c3967e2c0a0d599c5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_BaseReport_BaseReport.count_towards_summary.return.True": {"doc_hash": "9a68505ea0dab6c1950f984d6390d44e6c032f4d0524362d95f3a577ac8e16a4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_BaseReport.head_line_BaseReport._get_verbose_word.return.verbose": {"doc_hash": "3964fc91f7c1519ca1002c8198932169d8f2d0a359ade45126eab2bc46d76a5e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_BaseReport._to_json_BaseReport._to_json.disassembled_report.return._": {"doc_hash": "853c14bb548ef5c942034340e9ea804b7a6d7bb87e8b59b4f7b096cf8e771c99"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_BaseReport._to_json.d_BaseReport._to_json.return.d": {"doc_hash": "15eba18ac855a04c430482669c69ebd0892ce94c35a688dfaf3c1de84456a94e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_BaseReport._from_json_BaseReport._from_json.return.cls_reportdict_": {"doc_hash": "cfc70ceb0442924e8e966aa867e53255f61a1dbc61e7c0a8afc912b977bebd04"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py__report_unserialization_failure__report_unserialization_failure.raise_RuntimeError_stream": {"doc_hash": "16b0ad9dc4f7a2bc823800b94139f2b813816df4dad6a23527e1185fc4a5111c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_TestReport_TestReport.__repr__.return._s_r_when_r_outcome_": {"doc_hash": "e92590d002192dee27991f3896b346e3d6352bd825302bc2cf84e9d5ea58cd1c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_TestReport.from_item_and_call_TestReport.from_item_and_call.return.cls_": {"doc_hash": "cbaf47a597adc5f2197d8dadf48b6dba0f626cefa258291e2243d4da72b42c1d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_CollectReport_CollectReport.__repr__.return._CollectReport_r_lenres": {"doc_hash": "77a0cff7a427a67dfbb4a527febbeff3dea46a861a34786dff39f3601b415f10"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_CollectErrorRepr_": {"doc_hash": "c928695fe384ab1c308881d6319935a89270d431464a3197a0f7ad63dadd2c4a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/resultlog.py__log_machine_parseable_pytest_addoption.group_addoption_": {"doc_hash": "b9836f51a8d4266bcc99b65bf3fb2237dd3fe373c92e28271ba51b4e9510b7e1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/resultlog.py_pytest_configure_pytest_unconfigure.if_resultlog_.config_pluginmanager_unre": {"doc_hash": "454049864805638ac9f6f6bca4b83c5edc82cfe2c0fcdab522b61a127d3dd9f1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/resultlog.py_ResultLog_ResultLog.log_outcome.self_write_log_entry_test": {"doc_hash": "91a919c9467968904f796ab1eb089ed4f0db811dfac50245877db676ae3c0b65"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/resultlog.py_ResultLog.pytest_runtest_logreport_ResultLog.pytest_runtest_logreport.self_log_outcome_report_": {"doc_hash": "cf7d7ae301b89e3688493f53eb09abf7895df53337f5972890dc961b1d9a600e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/resultlog.py_ResultLog.pytest_collectreport_": {"doc_hash": "1a2f887c6f3b74be1961a623768ea33e3664de3a275470a66d179077d996b3d0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py__basic_collect_and_run_pytest_addoption.group_addoption_": {"doc_hash": "2ac342c8479f874cebbe587df9a9701db94a2ad0fcd30a7ec00e8799d8ba6fba"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_pytest_terminal_summary_pytest_terminal_summary.for_rep_in_dlist_.tr_write_line_02_2fs_": {"doc_hash": "973a605a00aa7283a727efdc9bf07c02b4a7c9bda1c951bd7e8f4139721f2e65"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_pytest_sessionstart_runtestprotocol.return.reports": {"doc_hash": "faa8afffa47457179176042e7431aa87cc382090b63493002e94ab7b9852f906"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_show_test_item_pytest_runtest_setup.item_session__setupstate_": {"doc_hash": "e7572ce9790a8813751bd8186ee422f3817e13aeaa782ba43da475e3132efc51"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_pytest_runtest_call_pytest_runtest_teardown.None_2": {"doc_hash": "3e725da8cef50c89064eff4cd8d72d4bef122398a4dafc2a95df00f525c87ad9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py__update_current_test_var__update_current_test_var.if_when_.else_.os_environ_pop_var_name_": {"doc_hash": "097f0eb9b8cffb1095f37ea307560e44955db8fc8dcd32db2e7928a314e203ab"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_pytest_report_teststatus_call_runtest_hook.return.CallInfo_from_call_": {"doc_hash": "36e6b67b5ee9bfd19f68b47d6c70481fb9a763e2b1d71de2ee77871c0263efbf"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_CallInfo_CallInfo.__repr__.return._CallInfo_when_when_r_": {"doc_hash": "5ba95ac7050601f27ec0d7af9d89621031bda1c250163cdd90d3db143851e738"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_pytest_runtest_makereport_pytest_make_collect_report.return.rep": {"doc_hash": "3a73c2254048dd2aaf433cdd8929d02968688ec2434a7151fac48b8076073aad"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_SetupState_SetupState._teardown_towards.if_exc_.six_reraise_exc_": {"doc_hash": "4ad405bfcee3c87478f67d2aef1d110f67f3111cc7adeef5b805132a76a6fc1b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_SetupState.prepare_": {"doc_hash": "126b006531932f98b9e9030f3db104104e00d0b2e80ddbaee35eebbf5da9a145"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/setuponly.py_from___future___import_ab_pytest_addoption.None_1": {"doc_hash": "3c4297ad128b47ec77bbeed301e361db88f7aecc463e4669012498f0ef6686ec"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/setuponly.py_pytest_fixture_setup_pytest_fixture_post_finalizer.if_hasattr_fixturedef_c.if_config_option_setupsho.if_hasattr_fixturedef_c.del_fixturedef_cached_par": {"doc_hash": "211bd91879ada5d91debbac78dfe70b9e814fa91ee93313131735c324ff10c3c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/setuponly.py__show_fixture_action_": {"doc_hash": "245bab35bca44c9b38aac845534f425391794cba5a06fa4b36691a4764552966"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/setupplan.py_from___future___import_ab_": {"doc_hash": "d760e151d93b1abdd29463d8723c5f759db27da60ab946a4111ac1c5aaab2237"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/skipping.py_pytest_configure_pytest_configure.None_2": {"doc_hash": "652c93fd371dea533256f62b198d9a03d4fa4d2b508e7e5c7da572d1b201e289"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/skipping.py_pytest_runtest_setup_pytest_runtest_setup.check_xfail_no_run_item_": {"doc_hash": "3ab43850c0cadc20ff168ce583eb29ac1d0398972fd9d90f7106b59a8db5eb87"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/skipping.py_pytest_pyfunc_call_check_xfail_no_run.if_not_item_config_option.if_evalxfail_istrue_.if_not_evalxfail_get_run.xfail_NOTRUN_evalx": {"doc_hash": "c4948d74f167bdefbeae0277709e78b13e08f788a3d876d7c551cb209cf1f97a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/skipping.py_check_strict_xfail_check_strict_xfail.if_evalxfail_istrue_.if_is_strict_xfail_.fail_XPASS_strict_": {"doc_hash": "157865ae6d487f12b02999c7ba1e26a94f238e7eab4156cdcd458bf8132aeec0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/skipping.py_pytest_runtest_makereport_": {"doc_hash": "413350d834d5de3a549c183175bf89303fbbe6f7df66af2ec25cea50274e0461"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/stepwise.py_pytest_pytest_configure.config_pluginmanager_regi": {"doc_hash": "12938804e6c975926e333d9598ec821a00d255b02e6e578d761c401f408b4a6f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/stepwise.py_StepwisePlugin_StepwisePlugin.pytest_collectreport.if_self_active_and_report.self.session.shouldstop._": {"doc_hash": "644e6fc72a33e235b748ccf90a02ae62c2dd3a989dc20955152481230c8bdff6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/stepwise.py_StepwisePlugin.pytest_runtest_logreport_": {"doc_hash": "0e720c60a5f24b589ce61cb3bd5448c1acc856894611f42cb373b176cec3fc2e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_MoreQuietAction_MoreQuietAction.__call__.namespace.quiet.getattr_namespace_quiet": {"doc_hash": "31214c600cee6db2bd6c8f8d432566a84647c532414fe6ef9a1fae98adcf9beb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_pytest_addoption_pytest_addoption.parser_addini_": {"doc_hash": "378bd109e9c1f8af8e47919d1b33e8c7e9d58b5d0c9a54c42100ff319659f944"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_pytest_configure_getreportopt.return.reportopts": {"doc_hash": "eda62ccbac7977a7f671e5fd6c446761a30cccd818bc4815a16b700aca1c0095"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_pytest_report_teststatus_WarningReport.count_towards_summary.True": {"doc_hash": "78c0fb18f5b94f2a00f7d620bd20789703c5ef41353e8c49e63f144ad0b598cf"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_WarningReport.get_location_WarningReport.get_location.return.None": {"doc_hash": "81ea5f459f1397dc7c6c57bc47c878444d5431e833bc48ad1e682d6b52942795"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter_TerminalReporter.__init__.self._collect_report_last_write.None": {"doc_hash": "73cf4d9f3b51fc7ecb5e74809edd97ebb19dcb341bc132e5c2fa5d5f54d71c9f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._determine_show_progress_info_TerminalReporter._determine_show_progress_info.return.False": {"doc_hash": "f05084745cce12e69d67f87dddeffa011d9cd29d7aa13aa786b48a286e72718a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.verbosity_TerminalReporter.hasopt.return.char_in_self_reportchars": {"doc_hash": "20a14f7727195b96a1cfea1ad05a24267a1c96b7eb00f92345a5cb66aa664de1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.write_fspath_result_TerminalReporter.write_fspath_result.self__tw_write_res_mar": {"doc_hash": "ec08521f0f5363d86cdeeeee392627a4fe1afde80b147b99dd257b5830438d12"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.write_ensure_prefix_TerminalReporter.write_line.self__tw_line_line_mar": {"doc_hash": "58e7d3b91a90bf67f8fcdad9ffef04aeb5201589e34387f3bfe865ba18a4e0ef"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.rewrite_TerminalReporter.rewrite.self__tw_write_r_lin": {"doc_hash": "cc37401fea62e77876673b88b812c77bd3c53cf6d91b5d9c19a4dbeff8cf3463"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.write_sep_TerminalReporter.pytest_internalerror.return.1": {"doc_hash": "97c3a618385ab6686d15cc65cc8e38b932224c2b56eb09a1d44d3cd78874241c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.pytest_warning_captured_TerminalReporter.pytest_warning_captured.warnings_append_warning_r": {"doc_hash": "49558601417a7728f37c48a00569487661882835ba3d27d41608604240a92919"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.pytest_plugin_registered_TerminalReporter.pytest_runtest_logstart.if_self_showlongtestinfo_.elif_self_showfspath_.self_write_fspath_result_": {"doc_hash": "1a8fa41846a68d919ef298f6e25c84f8360ff2dcd44cd18daba152c4eee4d605"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.pytest_runtest_logreport_TerminalReporter.pytest_runtest_logreport.if_self_verbosity_0_.else_.if_not_running_xdist_.else_.self.currentfspath._2": {"doc_hash": "e0ce8b05bc86fb3e35e06f5ee67c127de5d63c4a2051656a9d5dfa25a5cd6f7a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.pytest_runtest_logfinish_TerminalReporter.pytest_runtest_logfinish.if_self_verbosity_0_an.if_is_last_item_.else_.if_past_edge_.self__tw_write_msg_n_": {"doc_hash": "fb3cb8d84de616f904a3c3038021d95900b161211f47032ee3c1e27be2bc1c5d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._get_progress_information_message_TerminalReporter._get_progress_information_message.if_self__show_progress_in.else_.return._100_": {"doc_hash": "4f7312b32d321636b5b1a61163bbae6c85b3fa25058df902618cebbf82faeaf5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._write_progress_information_filling_space_TerminalReporter.pytest_collectreport.if_self_isatty_.self_report_collect_": {"doc_hash": "7753b293276bf3123994579eac02e5f81c969839dacce63defdb7f8de5269ce1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.report_collect_TerminalReporter.report_collect.if_self_isatty_.else_.self_write_line_line_": {"doc_hash": "e1e591a31ed9cb309c83c80f203090d2f3ea048256c4c7f6ccb7e597f869a69e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.pytest_sessionstart_TerminalReporter.pytest_sessionstart.self__write_report_lines_": {"doc_hash": "8c3da0fa574e1b663a36bcd20b5638e435492802fb8e029168a4ef8d8dbbbcbe"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._write_report_lines_from_hooks_TerminalReporter.pytest_report_header.return.result": {"doc_hash": "746b664534fc93d73037d296c6ceac6c3c7048836a16909022bb3465d36621f8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.pytest_collection_finish_TerminalReporter.pytest_collection_finish.None_1.if_self_stats_get_failed.for_rep_in_self_stats_get.rep_toterminal_self__tw_": {"doc_hash": "c682715bf4763652f045a659359adca1a26220c81fe91aeecbd1818117e488de"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._printcollecteditems_TerminalReporter._printcollecteditems.for_item_in_items_.for_col_in_needed_collect.if_self_config_option_ver.if_hasattr_col__obj_a.for_line_in_col__obj___do.self__tw_line_s_s_i": {"doc_hash": "87e43c3db023a105c18cc8b87027fb6dd55d5e07e42b4d2ca9a63aa0ad52f6c1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.pytest_sessionfinish_TerminalReporter.pytest_sessionfinish.self_summary_stats_": {"doc_hash": "20c43591255215b9f1fff8830a956fd6fd1dd2f4f699ab03073ecfdfdad675c8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.pytest_terminal_summary_TerminalReporter.pytest_unconfigure.if_hasattr_self__keyboa.self__report_keyboardinte": {"doc_hash": "04c0478e2b69637280946298670a2575bd2718d694a0a76fea85543d3768c889"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._report_keyboardinterrupt_TerminalReporter._report_keyboardinterrupt.if_KeyboardInterrupt_in.if_self_config_option_ful.else_.self__tw_line_": {"doc_hash": "a588b0ce4bd6225ede628918888af31298ffd6d77de47b53eb965ffef151aa2f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._locationline_TerminalReporter._locationline.return.res_": {"doc_hash": "18b0524c8bb4aef0ff764aef8c7ef57a1b5836b0d30c2c626952d18a6d2e9e89"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._getfailureheadline_TerminalReporter.getreports.return.values": {"doc_hash": "40bdd9bc0711fa6f9acdc1484d921d80b884f4b5ffdb79433c5a6ae25f03c988"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.summary_warnings_TerminalReporter.summary_warnings.if_self_hasopt_w_.self__tw_line_Docs_h": {"doc_hash": "872f90e0bd3614f053f15a8a09730461d410f4ab3f29c0825bbb0478116ad892"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.summary_passes_TerminalReporter.print_teardown_sections.for_secname_content_in_r.if_teardown_in_secname_.self__tw_line_content_": {"doc_hash": "2c40e5a760601828e6a92e2156e0aca95153b6d0841fc607f46df282d3ba3b99"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.summary_failures_TerminalReporter.summary_failures.if_self_config_option_tbs.if_self_config_option_tbs.else_.for_rep_in_reports_.for_report_in_teardown_se.self_print_teardown_secti": {"doc_hash": "0991adcff16b7fd234f9ccf2e50d193dc35e31b36af3abe4e240be1d50f06086"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.summary_errors_TerminalReporter.summary_errors.if_self_config_option_tbs.for_rep_in_self_stats_er.self__outrep_summary_rep_": {"doc_hash": "cb9350a942589ba5a540e5a296889606480eb168000493f13988a4e44a74eff4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._outrep_summary_TerminalReporter.summary_stats.if_self_verbosity_1_.self_write_line_msg_ma": {"doc_hash": "0b7c1f31a5616ff4084939e722b630533d03cbb32138441bd5cc5540ac34af6e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.short_test_summary_TerminalReporter.short_test_summary.show_xpassed.for_rep_in_xpassed_.lines_append_s_s_s_": {"doc_hash": "4f4911025cdb71af655d888ddc1db291a673f01bbded9bea24b52eac242c8066"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.short_test_summary.show_skipped_TerminalReporter.short_test_summary.show_skipped.for_num_fspath_lineno_.if_lineno_is_not_None_.else_.lines_append_s_d_s_": {"doc_hash": "8ad4122a8956f730b9b7e7d43a02598101a1dff7e624f87f179e06ed6a8406fb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py__folded_skips__folded_skips.return.values": {"doc_hash": "8c9ced21670ceb1304e55be350dce4a26dea4b1997cac4d1cbe0dbc1bfd8eede"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_build_summary_stats_line_": {"doc_hash": "8cd483c5f8e3630fc036250845a64d9aaf2f19e6ac15f247a3afcadfe0a5c47e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/tmpdir.py__support_for_providing_TempPathFactory.mktemp.return.p": {"doc_hash": "8d8fb527fcc9824f56b47649fe18dd06d62827e7bb764a3c7a9df29f960c72f9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/tmpdir.py_TempPathFactory.getbasetemp_TempPathFactory.getbasetemp.if_self__basetemp_is_None.else_.return.self__basetemp": {"doc_hash": "64a23881091594cdcad6a5e8cf1d4984ecda0fa79a38ea9da0d3256c3db0cd0f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/tmpdir.py_TempdirFactory_TempdirFactory.ensuretemp.return.self_getbasetemp_ensure": {"doc_hash": "c8dc12864890ebf32c120582c46b683e250ea4963fcf112e3fa355802cb2da79"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/tmpdir.py_TempdirFactory.mktemp_get_user.try_.except_ImportError_KeyE.return.None": {"doc_hash": "94e3dafbd86070fecf27d84be7a60374a5ee0dc79a94536a74b5d6aa61ce2ae8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/tmpdir.py_pytest_configure_pytest_configure.mp_setattr_pytest_ensur": {"doc_hash": "4e03073773e79abd628240a519612da7a5c5a38b76fa48c2330588e0ad33cad6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/tmpdir.py_tmpdir_factory_": {"doc_hash": "6d5d0780b4fefe9f2b7f89b728895fa3379d7a0a0bdbb9945a035720472c2fcd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py__discovery_and_running_pytest_pycollect_makeitem.return.UnitTestCase_name_parent": {"doc_hash": "f0829628d64655783dc2fbbf2f3c209c81b5328e42c74cf614224a97c80f9734"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py_UnitTestCase_UnitTestCase.collect.if_not_foundsomething_.if_runtest_is_not_None_.if_ut_is_None_or_runtest_.yield_TestCaseFunction_r": {"doc_hash": "6e33222ca05a8e1584043bef8c78cea8f3be8afbc9ad8b58adeabf772d7705d3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py_UnitTestCase._inject_setup_teardown_fixtures_UnitTestCase._inject_setup_teardown_fixtures.if_method_fixture_.cls.__pytest_method_setup.method_fixture": {"doc_hash": "e25b6cb4d070d680df7f721134f4d6596b6521ad82ad285462dc6f7584b635b6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py__make_xunit_fixture__make_xunit_fixture.return.fixture": {"doc_hash": "62e45c08e3a735c45b58bfa3b705a4fc0b8cdf70a1d71b0a2f16ca3e9532c1af"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py_TestCaseFunction_TestCaseFunction.startTest.pass": {"doc_hash": "f461a449c703880cac03d5103ba5a8c765b59b8f8b99c867b762956e7f511c10"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py_TestCaseFunction._addexcinfo_TestCaseFunction._addexcinfo.self___dict___setdefault_": {"doc_hash": "14f20b63e344665abeb046fed4513769d7359974a8ce9910037728929820c674"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py_TestCaseFunction.addError_TestCaseFunction.stopTest.pass": {"doc_hash": "1361f64de3d4976af6dde55d8fdac09c51bec17b1d4393c6f618f8efb7cb2da2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py_TestCaseFunction._handle_skip_TestCaseFunction._handle_skip.return.False": {"doc_hash": "38bf26f2c7efa537a0ae006b7eab997814f2ab609d233781966db6d7863182a6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py_TestCaseFunction.runtest_pytest_runtest_makereport.if_isinstance_item_TestC.if_item__excinfo_.try_.except_AttributeError_.pass": {"doc_hash": "44474b8626e57a7f3f1ce47eecc1258e35f205b5e9218fe25a878b0352091f68"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py__twisted_trial_support_": {"doc_hash": "818a334784ccd3d6d49c1dfdfb12ace59fac1ccc2b748cf6ef07e3df6c2b0564"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warning_types.py_attr_PytestDeprecationWarning._": {"doc_hash": "1e37c56a9bfd730302c65472a52b5e648a6129a63c3aca77a2d2215eb250f686"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warning_types.py_PytestExperimentalApiWarning_PytestExperimentalApiWarning.simple.return.cls_": {"doc_hash": "9b22427e832ecf56041d840ea7f3e5b1ed8c5e63cea8701d04aee5a5ff56347a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warning_types.py_PytestUnhandledCoroutineWarning_": {"doc_hash": "3279276b59914e4b3c07cedaf1c05b81fb3ba28de249e14ce31c45894799dcc0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warnings.py_from___future___import_ab__setoption.wmod_filterwarnings_actio": {"doc_hash": "958b1e5becd67f7f84a83e34217a6a429453e8a03f49eaff06a40b27b310b577"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warnings.py_pytest_addoption_pytest_configure.config_addinivalue_line_": {"doc_hash": "e5efca21079108e37162f18c92743a32acf255b543300dbf6911ae3db2764abf"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warnings.py_catch_warnings_for_item_catch_warnings_for_item.with_warnings_catch_warni.for_warning_message_in_lo.ihook_pytest_warning_capt": {"doc_hash": "d6c9f6d71907414e5c08282914a2fa0a8ab7875865c78c4a310da713ff59b278"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warnings.py_warning_record_to_str_warning_record_to_str.return.msg": {"doc_hash": "60a92500a8f1acc74b8859d1eef041d705f8d25488393552eab5425c21430222"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warnings.py_pytest_runtest_protocol_pytest_terminal_summary.with_catch_warnings_for_i.yield": {"doc_hash": "735a5337632e0671919aa027237a05fd4c100fe21ac009a5daa7e0a1da72a022"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warnings.py__issue_warning_captured_": {"doc_hash": "347375d1dfcf78572e696f5f47a1c816881fe6668f586b21fdb4866632f6c782"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/pytest.py__PYTHON_ARGCOMPLETE_OK_": {"doc_hash": "76ca478b15e9d5f740a2a58dd496d855e1b92e5cafe85f41057ae1df39f01d0a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py__coding_utf_8__prepend_pythonpath.return.os_pathsep_join_str_p_fo": {"doc_hash": "1ecbbd01b65570b23eb0add3cfb9804c293429a51b55672aecaadd06d886cebd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage_TestGeneralUsage.test_root_conftest_syntax_error.assert_result_ret_0": {"doc_hash": "3e1886145f41ac35fefc7cf3cbc6a3cfdde5526d0e470d6feea2ecc5a7c5b648"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_early_hook_error_issue38_1_TestGeneralUsage.test_early_hook_error_issue38_1.None_2": {"doc_hash": "fe02271758caf41f744b1543a33992e7a15cdf8332738d0f091f5303a699c6a7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_early_hook_configure_error_issue38_TestGeneralUsage.test_file_not_found_unconfigure_issue143.result_stdout_fnmatch_lin": {"doc_hash": "bab898865056e1004a440aba8bce476f5c4f5f611a222cbe564ee28f80ee23fd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_config_preparse_plugin_option_TestGeneralUsage.test_config_preparse_plugin_option.result_stdout_fnmatch_lin": {"doc_hash": "8f92b4f0279d6b9ee038829b69864553b6dcdf8cc2874f18c5d3bd589b01e09f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_early_load_setuptools_name_TestGeneralUsage.test_early_load_setuptools_name.if_load_cov_early_.else_.assert_loaded_myplug": {"doc_hash": "20cec49eb6133a93d300ab671d0344716c757f5b3e5f3ba745a7028131c3f12c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_assertion_magic_TestGeneralUsage.test_not_collectable_arguments.result_stderr_fnmatch_lin": {"doc_hash": "b79594a5dfda577f7a1ce239125211acac68c54a4bb58d36b27d2720f3909d42"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_better_reporting_on_conftest_load_failure_TestGeneralUsage.test_better_reporting_on_conftest_load_failure.result_stderr_fnmatch_lin": {"doc_hash": "50dfa533f5a72d8f4a863d4352993fb652e545e37f5e3769020ce68b68a7d306"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_early_skip_TestGeneralUsage.test_conftest_printing_shows_if_error.assert_should_be_seen_i": {"doc_hash": "10ceaea8188e9ca039c0eeb655f82af95c8144d030690c33883c9c48c592a811"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_chdir_TestGeneralUsage.test_chdir.assert_not_result_ret": {"doc_hash": "f43fc909922350f371a00d6349dc392968905e0a070e0b8049f523eefe53ed27"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_issue109_sibling_conftests_not_loaded_TestGeneralUsage.test_directory_skipped.result_stdout_fnmatch_lin": {"doc_hash": "50419958f8cfc34c1f260b7ea0de4a01d311d1413ac4ff7fc968799d8f91da90"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_multiple_items_per_collector_byid_TestGeneralUsage.test_multiple_items_per_collector_byid.result_stdout_fnmatch_lin": {"doc_hash": "fd2fa8cdd52e243d06d95d0ed3c49b426374d7c5f60c6157e2ffc1951b6db241"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_skip_on_generated_funcarg_id_TestGeneralUsage.test_skip_on_generated_funcarg_id.res_stdout_fnmatch_lines_": {"doc_hash": "36ba58da67bdebebfbfba76d86a849670c36bb595106dc7c622f9aa7fd145241"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_direct_addressing_selects_TestGeneralUsage.test_direct_addressing_selects.res_stdout_fnmatch_lines_": {"doc_hash": "ec8af7417ae3b52a877762bb94cff1de4f2d2bc87b8234b2d0f117d084041996"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_direct_addressing_notfound_TestGeneralUsage.test_initialization_error_issue49.assert_sessionstarttime_": {"doc_hash": "1bde10c55015418f3b0f3e3f10381505eded5595f3bd87a0a864fdb3aa52a072"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_issue134_report_error_when_collecting_member_TestGeneralUsage.test_issue134_report_error_when_collecting_member.if_in_lookfor_._usage_error_only_if_ite": {"doc_hash": "9c9418edbe5aa29c556fc273ad544d37c64255a3ac15d3bfd87dd27da1c416f3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_report_all_failed_collections_initargs_TestGeneralUsage.test_report_all_failed_collections_initargs.assert_result_ret_EXIT": {"doc_hash": "84e48748edb74f161a2d587f9893ce12f741414d0c315e2301fb660519184c49"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_namespace_import_doesnt_confuse_import_hook_TestGeneralUsage.test_namespace_import_doesnt_confuse_import_hook.assert_res_ret_0": {"doc_hash": "55f193f32bc99b5da8e2c9ee5cd3d35730ef4e285917040df11488fabc65ed98"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_unknown_option_TestGeneralUsage.test_getsourcelines_error_issue553.res_stdout_fnmatch_lines_": {"doc_hash": "ada3f66ff970c434e3c69c67225c7c2ce09a8cab2412c97b05d063bb959e0d98"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_plugins_given_as_strings_TestGeneralUsage.test_plugins_given_as_strings.assert_pytest_main_args_": {"doc_hash": "cd05f10dbd6a2b666bb7ce9c507c22df471789c13f37d60fdf8b96b2588ef358"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_parametrized_with_bytes_regex_TestGeneralUsage.test_parametrized_with_null_bytes.res_assert_outcomes_passe": {"doc_hash": "f75e2c82b5c43b16cb88f9474a24e398d43063044c43b78133051fe2affc52c8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestInvocationVariants_TestInvocationVariants.test_invoke_plugin_api.assert_myopt_in_out": {"doc_hash": "817aed3a5ea0bf1f50a2c46766a635a4a688c80ed5d44e30bfe8c58cd9838dcd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestInvocationVariants.test_pyargs_importerror_TestInvocationVariants.test_cmdline_python_package.result_stderr_fnmatch_lin": {"doc_hash": "38738816884b63e5851e60137d3143ca90b930a74ab9313010b2ef49e4a646b5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestInvocationVariants.test_cmdline_python_namespace_package_TestInvocationVariants.test_invoke_test_and_doctestmodules.result_stdout_fnmatch_lin": {"doc_hash": "79ed46124248e9ade51f0324181e0213e8dcf7134b959e0e98a62644803d0974"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestInvocationVariants.test_cmdline_python_package_symlink_TestInvocationVariants.test_cmdline_python_package_symlink.if_hasattr_py_path_local_.else_.result_stdout_fnmatch_lin": {"doc_hash": "61b1e0ad731fdaa28100e1883af1df48039bb697fd1d6f43155a7e7d9b3f493c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestInvocationVariants.test_cmdline_python_package_not_exists_TestInvocationVariants.test_noclass_discovery_if_not_testcase.reprec_assertoutcome_pass": {"doc_hash": "738abcfa172367811a6535bbab433cbedca1e3c3b801f62c17033bde7b4bb127"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestInvocationVariants.test_doctest_id_TestInvocationVariants.test_has_plugin.assert_request_config_plu": {"doc_hash": "e83b126d58c5f868fb4a1dba5248b6e7cf0ffc69d202edfe13d963c3abaf0e99"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestDurations_TestDurations.test_calls_showall.for_x_in_23_.for_y_in_call_s.for_line_in_result_stdout.else_.raise_AssertionError_not": {"doc_hash": "4e0e67e6cc18d32a8c6288e4db267b378539fb965705ec33f882931c84d13bf8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestDurations.test_calls_showall_verbose_TestDurations.test_calls_showall_verbose.for_x_in_123_.for_y_in_call_s.for_line_in_result_stdout.else_.raise_AssertionError_not": {"doc_hash": "e563d59a753c03256b91cc7ebaba307085d7aff2c7e8f8cbc075459aba57ab55"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestDurations.test_with_deselected_TestDurations.test_with_not.assert_result_ret_0": {"doc_hash": "13b6326aea2ca782117f34b3ec5b90ec87dbbea01bb1d1327713c78e53a6fdfb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestDurationWithFixture_TestDurationWithFixture.test_setup_function.result_stdout_fnmatch_lin": {"doc_hash": "7517fcad013a970f443379e29f99a8e6952f919e336a702f22ad28f4d2d2d1f3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_test_zipimport_hook_test_zipimport_hook.assert_INTERNALERROR_n": {"doc_hash": "a5b1323be3bf17ecf4afbf25a46a6f2a3e79119bd6467598857bbf75e50bb74f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_test_import_plugin_unicode_name_test_pytest_plugins_as_module.result_stdout_fnmatch_lin": {"doc_hash": "f918035ab854ec59a4d9efa56f08b9e54319a42239dfa7958b48ecde6e9c2521"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_test_deferred_hook_checking_test_deferred_hook_checking.result_stdout_fnmatch_lin": {"doc_hash": "e1188f01c71325729f4659470348bdec9ad420ec6299cb5804b46dda4c2958ff"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_test_fixture_values_leak_test_fixture_values_leak.result_stdout_fnmatch_lin": {"doc_hash": "459c2953c728e7290150b892022260ce2a754f726881ca5c7576f4b3f3a0a17a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_test_fixture_order_respects_scope_test_fixture_order_respects_scope.assert_result_ret_0": {"doc_hash": "3f038efba8ec1fc61b7d3a0e814e7a0bbd5e020f10cfaa5cf6c2cec835501569"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_test_frame_leak_on_failing_test_test_frame_leak_on_failing_test.result_stdout_fnmatch_lin": {"doc_hash": "c040c95f9927cd86db8fe25be4e45673bb0f203cf4d6c28345e9a503d132a64c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_test_fixture_mock_integration_": {"doc_hash": "15d77ca8755e23a441f69d27040372687c0e7bb58cf4855cb65d88e1288d8b05"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_code.py__coding_utf_8_test_unicode_handling_syntax_error.if_sys_version_info_0_.text_type_excinfo_": {"doc_hash": "437d9d83bdb0febbef49b66576c1d29e508efbb2e87dbcd9d79e4d21a771dcb0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_code.py_test_code_getargs_test_code_getargs.assert_c4_getargs_var_Tru": {"doc_hash": "bab7247e7f2543c4bfbbf3896f4f25d848167f8866a0e63af83fe45be96cb8fb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_code.py_test_frame_getargs_test_frame_getargs.assert_fr4_getargs_var_Tr": {"doc_hash": "78f299eff887a8eb8a8d9e32c10dbbdee62e8207bdb8ad3bee44655030c86f68"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_code.py_TestExceptionInfo_TestTracebackEntry.test_getsource.assert_assert_False_in_": {"doc_hash": "6a70566c5ced4afa581857c21add1154120ccd44f35b0d718087ccaa491eedcc"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_code.py_TestReprFuncArgs_": {"doc_hash": "5d8e4fd2ff12d6917f09e0ad84df51ad9188ff1c18cc24d40cc2ecb090ac00a1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py__coding_utf_8__limited_recursion_depth.sys_setrecursionlimit_bef": {"doc_hash": "fd9e68f2f10b6f29cb262c33c77206a4df6acd3f2b1cb5e909d2aac3b52b72c7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TWMock_test_excinfo_simple.assert_info_type_Value": {"doc_hash": "15571126620ab2fe56163f7c2180b222f20fcc265860544d1a0c3f08216d8392"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_test_excinfo_getstatement_test_excinfo_getstatement._xxx": {"doc_hash": "4c2504f692c6e3855065f542b4e9a8c11fd5da42d8872a27ae37a9e424907e99"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py__testchain_for_getentrie_TestTraceback_f_g_h.test_traceback_entry_getsource.assert_s_endswith_raise_": {"doc_hash": "284fc5ff21935d824c143fffff29eb712f49d989f643f7f7231d6fbfeb1d353d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestTraceback_f_g_h.test_traceback_entry_getsource_in_construct_TestTraceback_f_g_h.test_traceback_entry_getsource_in_construct.try_.except_NameError_.assert_s_strip_endswith": {"doc_hash": "15c6da9f75093a9f97e40b56feb32d1462e579f5758d48fca0ce0a0d2206df0e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestTraceback_f_g_h.test_traceback_cut_TestTraceback_f_g_h.test_traceback_filter.assert_len_ntraceback_": {"doc_hash": "8e29027dab30779a97fd099406e174e8f6de8f68a23704fa7427248b881473e4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestTraceback_f_g_h.test_traceback_filter_selective_TestTraceback_f_g_h.test_traceback_filter_selective.if_matching_.else_.assert_len_ntraceback_": {"doc_hash": "2fce5c9164150b23b02ec60465dc5f3631289f2623132b5829d24376909ab41c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestTraceback_f_g_h.test_traceback_recursion_index_TestTraceback_f_g_h.test_traceback_messy_recursion.assert_excinfo_traceback_": {"doc_hash": "99a42ec02f2d6ce841110044c88870c99b030d5bbdf1afbae85f9a34b4a4539d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestTraceback_f_g_h.test_traceback_getcrashentry_TestTraceback_f_g_h.test_traceback_getcrashentry_empty.assert_entry_frame_code_n": {"doc_hash": "e510e9a4ef8f0d8ba4685f1ac666a82b8f9487886f606db1dc50f4f9574b005d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_test_excinfo_exconly_test_excinfo_no_sourcecode.assert_s_File_str": {"doc_hash": "10b29175fb211f37eaaa923f2b42ddc3b2e676353c6ff0f50ed1f1f83286febf"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_test_excinfo_no_python_sourcecode_test_excinfo_no_python_sourcecode.for_item_in_excinfo_trace.if_item_path_basename_.assert_str_item_source_": {"doc_hash": "50b67102e0f67197795d6dfd5062bbb69224cad6320cbd54817487eb96555a4e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_test_entrysource_Queue_example_test_match_raises_error.result_stdout_fnmatch_lin": {"doc_hash": "762fde2c88c72b7414cbd5ff580a684dbea65796013803007ca287f2a0794d5c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo_TestFormattedExcinfo.test_repr_source.assert_lines_1_": {"doc_hash": "5e8c2eef43d0b54a2e079305b6787e5513c508935e6fee3fab84c83313869e40"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_source_excinfo_TestFormattedExcinfo.test_repr_source_excinfo.assert_lines_def": {"doc_hash": "424b0d399c31cdb6d743e4d602bac7c3cbcb22c6c8a759c8fcfa5dd128cbc90f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_source_not_existing_TestFormattedExcinfo.test_repr_source_not_existing.if_sys_version_info_0_.assert_repr_chain_0_0_r": {"doc_hash": "b2f3e70b955039ade48100411d764088d4ccf3fb7509407deec8b4c0b16a4b6a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_many_line_source_not_existing_TestFormattedExcinfo.test_repr_many_line_source_not_existing.if_sys_version_info_0_.assert_repr_chain_0_0_r": {"doc_hash": "5887e28c1a44cc6e17a7a199f04b954e0c2e661b534982fb26407e7ef288cd8f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_source_failing_fullsource_TestFormattedExcinfo.test_repr_source_failing_fullsource.None_1.assert_repr_chain_0_0_r": {"doc_hash": "6cfca8024058eed64dd4e8c61a243701eba6ffe9f85d5b1baac80234ef5a64f7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_local_TestFormattedExcinfo.test_repr_local.assert_reprlocals_lines_3": {"doc_hash": "b4dfb615bc33880ae85b4c7cf0fc9ffd319c38968c9b66d93f3270a8098d2904"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_local_with_error_TestFormattedExcinfo.test_repr_local_with_error.assert_NotImplementedEr": {"doc_hash": "36b4f4b927163a1be08e9f800def1dd46e2f483e8ea5035232ab71fb8ae07e5c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_local_with_exception_in_class_property_TestFormattedExcinfo.test_repr_local_with_exception_in_class_property.assert_ExceptionWithBro": {"doc_hash": "af3450ee464169aee8c30558d947f003b8adddedbfdb21daf8c2eb1ed44cceec"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_local_truncated_TestFormattedExcinfo.test_repr_local_truncated.None_3": {"doc_hash": "96a34676e5a77720678b5ed4a734d38a23290f4703b32fe6f135a482cb47c930"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_tracebackentry_lines_TestFormattedExcinfo.test_repr_tracebackentry_lines._assert_loc_message_": {"doc_hash": "ccc3a63c7a10ae5f184a3249849c065dd22ac32d5a03d990193298c1084abd77"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_tracebackentry_lines2_TestFormattedExcinfo.test_repr_tracebackentry_lines2.assert_tw_lines_2_z_": {"doc_hash": "d0d5eb957bc68bf2ef7fab00c938b6760c0313797b84e349765203df12860082"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_tracebackentry_lines_var_kw_args_TestFormattedExcinfo.test_repr_tracebackentry_lines_var_kw_args.assert_tw_lines_0_x_": {"doc_hash": "2430d302b7e81530132350de314d888b878d06a62f3ce60521a7f431d8c1b497"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_tracebackentry_short_TestFormattedExcinfo.test_repr_tracebackentry_short.None_6": {"doc_hash": "6c899394501c8fafd61437eb0ef5aeac0bf8c31ef87e04c2af4d9af1f15de591"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_tracebackentry_no_TestFormattedExcinfo.test_repr_tracebackentry_no.assert_not_lines_1_": {"doc_hash": "57495416c3b2cb6889297fe8e0f3185f666473fe6628250771c22ec927e9848c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_traceback_tbfilter_TestFormattedExcinfo.test_repr_traceback_tbfilter.None_1": {"doc_hash": "48c86ec57156e68d3d2652b275f4b981517c898fa7fb60f3ea881374f8a97852"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_traceback_short_no_source_TestFormattedExcinfo.test_traceback_short_no_source.assert_last_lines_1_": {"doc_hash": "301d9d8bd215ac41cc7e957e61f652f03e8904da0d6db79c526cdc2d8ec45b9e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_traceback_and_excinfo_TestFormattedExcinfo.test_repr_traceback_and_excinfo.for_style_in_long_sh.assert_repr_reprcrash_mes": {"doc_hash": "c063ecaebff2b1fa3f4d9951859e907e07f6fa33a85e4381908014c834eed6af"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_traceback_with_invalid_cwd_TestFormattedExcinfo.test_repr_traceback_with_invalid_cwd.p_repr_traceback_excinfo_": {"doc_hash": "4ce9825b1ce3d0ec7485d8d4d993f43d0899b6cad59bb5b2000a95d8fac19fc9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_excinfo_addouterr_TestFormattedExcinfo.test_repr_excinfo_reprcrash.assert_str_repr_reprcrash": {"doc_hash": "091dc3f4a7f092d72f35e0d7c90c0ab7e6d62275273af6d0f98ad8f42cdf8a3d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_traceback_recursion_TestFormattedExcinfo.test_repr_traceback_recursion.for_style_in_short_l.assert_str_reprtb_": {"doc_hash": "6a1a586c66455ca194fdc526cc081efa1d00b00ecb494f753feca4eafbf5eaa2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_reprexcinfo_getrepr_TestFormattedExcinfo.test_reprexcinfo_unicode.assert_x_u_\u044f_": {"doc_hash": "dd954d0a30a26cad6e67cc9953962b828324c0da4b4b5a6215de86ff744f1d2f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_toterminal_long_TestFormattedExcinfo.test_toterminal_long.assert_tw_lines_12_": {"doc_hash": "00f0ef273490330fc4b9be5fddea0a0a84c5437d8fbfcafa8c55a1881fe5cf7a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_toterminal_long_missing_source_TestFormattedExcinfo.test_toterminal_long_missing_source.assert_tw_lines_10_": {"doc_hash": "380afb95c8e445dab329f0214eb22bd6e4661a47d15beb752f99f0c167377e31"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_toterminal_long_incomplete_source_TestFormattedExcinfo.test_toterminal_long_incomplete_source.assert_tw_lines_10_": {"doc_hash": "0476327263270fcfb3fb12764cc53c16e20386435a97b3fd815d2b09d7fe6fb7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_toterminal_long_filenames_TestFormattedExcinfo.test_toterminal_long_filenames.try_.finally_.old_chdir_": {"doc_hash": "f9fe4302bd3a418c51bdecdf3ca2fd7d3e10a02b95f08d02142e7a4c591a49b0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_format_excinfo_TestFormattedExcinfo.test_format_excinfo.assert_tw_stringio_getval": {"doc_hash": "cbcc1897856f5f21aee4b8ea0fe9d4f3a65aa3e1ddab34f15e7393327c809040"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_traceback_repr_style_TestFormattedExcinfo.test_traceback_repr_style.assert_tw_lines_20_": {"doc_hash": "12424ab80405b99ca09efc35624ee2fc6a0698a32f0f4e0c9ceb1d5e1e923f9e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_exc_chain_repr_TestFormattedExcinfo.test_exc_chain_repr.assert_tw_lines_47_": {"doc_hash": "7a97fd2af40b3e2b84a7921bba23c37707fc6e0345c12bca88451b86968b9a19"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_exc_repr_chain_suppression_TestFormattedExcinfo.test_exc_repr_chain_suppression.assert_len_tw_lines_1": {"doc_hash": "3ec9a916e8319052c6c6cb05fc22fd79ca14be4c6e6442be0669b7520a1f6914"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_exc_chain_repr_without_traceback_TestFormattedExcinfo.test_exc_chain_repr_without_traceback.matcher_fnmatch_lines_": {"doc_hash": "40c2475699733222db784f593f563e8941f12b0a718978cf72743c9797b4fc4b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_exc_chain_repr_cycle_TestFormattedExcinfo.test_exc_chain_repr_cycle.assert_out_expected_ou": {"doc_hash": "04ad03053a043ca509e539cf5fbc603c387b3067e7c6e0c31cb242cb8e306cee"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_test_repr_traceback_with_unicode_test_cwd_deleted.assert_INTERNALERROR_no": {"doc_hash": "1645a3b71626a30e7fcae85b4373f7cdefe53af7686a7e2aaa8f0024ac8e7c61"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_test_exception_repr_extraction_error_on_recursion_test_exception_repr_extraction_error_on_recursion.matcher_fnmatch_lines_": {"doc_hash": "2d46088c67e4013d4399240eaa626bb915d3ef9c2ff14fac7e3d76af61a56f77"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_test_no_recursion_index_on_recursion_error_": {"doc_hash": "06c38871210fcfe708c25e124e7bc7f724966436ad8efc730cdd53b063be4ca6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py__coding_utf_8__test_isparseable.assert_not_Source_chr_0_": {"doc_hash": "75618fe24e34351f50b40fae5f329a0f857815089dc50f5bf5f53b344e43f3ab"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_TestAccesses_TestAccesses.test_iter.assert_len_values_4": {"doc_hash": "248446825830905a36a65d4cf78b22a17d5f79e36ad4b6cb017be98c75fac964"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_TestSourceParsingAndCompiling_TestSourceParsingAndCompiling.test_getstatementrange_triple_quoted.None_1": {"doc_hash": "096955278a968d81a81d556a280bc1aba640e1515a5dc1d5726443a8399a1b98"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_TestSourceParsingAndCompiling.test_getstatementrange_within_constructs_TestSourceParsingAndCompiling.test_getstatementrange_within_constructs.None_4": {"doc_hash": "69740e4eb7195857850efb1c7a5d5e026d5cc7d50a9eec310112bb5c56fd4a67"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_TestSourceParsingAndCompiling.test_getstatementrange_bug_TestSourceParsingAndCompiling.test_compile_and_getsource.assert_str_stmt_strip_": {"doc_hash": "17917260eeffbdad68a7dff4606a478b62da0f718c78e4ad43318f3f0efced53"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_TestSourceParsingAndCompiling.test_compilefuncs_and_path_sanity_TestSourceParsingAndCompiling.test_offsetless_synerr.pytest_raises_SyntaxError": {"doc_hash": "56f847ee8a7becbd51db44a825f959be75ff584bae69ce434cae8e8ce3d6b7da"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_test_getstartingblock_singleline_test_deindent.assert_lines_def_f_": {"doc_hash": "df44f348090f9de8bdb17a64e9902ffb1c1ae74da787343602bedb9f4d51f1d8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_test_source_of_class_at_eof_without_newline_test_source_of_class_at_eof_without_newline.assert_str_source_strip_": {"doc_hash": "db7778aeb17349fbaac2de3dcd233b2def2fb250279e7377c65b96147a030ca7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_if_True__test_findsource.assert_src_lineno_": {"doc_hash": "83c7aff0bfe4199caf01033b4d58e22fd8a5881f45fe26b95207f9b527524451"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_test_getfslineno_test_getfslineno.assert_getfslineno_B_1_": {"doc_hash": "e521a89c23d77f4d3adf23e8fbed778aa190a384478a89a2b28c56dbd6b6ad1f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_test_code_of_object_instance_with_call_test_oneline_and_comment.assert_str_source_ra": {"doc_hash": "4922d4bb5a8e11d21287e0edca31a7b30db4156497b9b807dcf1e3ebe1c54661"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_test_comments_test_comments.for_line_in_range_tqs_sta.assert_str_getstatement_l": {"doc_hash": "cdd70f73619130f0427e6bbb571eaddddbfa85a277a16161af1c5ea62ad5dd6e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_test_comment_in_statement_test_multiline.assert_str_source_ra": {"doc_hash": "5fe6f1cf61d790203d412565d28fa04a3d0216c7143c5bac1e3050638aba8ab0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_TestTry_TestTryFinally.test_finally.assert_str_source_": {"doc_hash": "078b61fa9c58779db87011f2934567b03556e34ab4baae0b2f0d3aa0ddc6d5bd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_TestIf_TestIf.test_else.assert_str_source_": {"doc_hash": "1ea76db5f9c69881f8915e34bc3b4a4c14b41ec1ecb93dae731648f646e2a645"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_test_semicolon_": {"doc_hash": "118b86bbb13b15d453edcf9da628670f412076b51bd1b6d989d0fc7b8c5e1e26"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/conftest.py_pytest_": {"doc_hash": "71aaaa4d02f959b971e1e24207d5fd95b884afdcc84db8e919bca187a3cc00a7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/deprecated_test.py_from___future___import_ab_test_getfuncargvalue_is_deprecated.pytest_deprecated_call_re": {"doc_hash": "accb20d9ab9d7850d6bfbe9c2b716c69fde4b7aa0df08e4baf8add9392898d6e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/deprecated_test.py_test_resultlog_is_deprecated_test_resultlog_is_deprecated.None_2": {"doc_hash": "84a1516206d184edd0ab343ac7b267b7cfc133aacb84955996c5861940f2fab6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/deprecated_test.py_test_terminal_reporter_writer_attr_test_pytest_catchlog_deprecated.res_stdout_fnmatch_lines_": {"doc_hash": "c87818c80c591cd40a9e0744b034606039c47ffd827a62fbcff139afcc505565"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/deprecated_test.py_test_raises_message_argument_deprecated_test_pytest_plugins_in_non_top_level_conftest_deprecated.res_stdout_fnmatch_lines_": {"doc_hash": "a1f6c37d46316535c42093dc6730ccb0fd1a72bfc0df9fef726a1c01eba03c68"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/deprecated_test.py_test_pytest_plugins_in_non_top_level_conftest_unsupported_pyargs_test_pytest_plugins_in_non_top_level_conftest_unsupported_pyargs.if_use_pyargs_.else_.res_stdout_fnmatch_lines_": {"doc_hash": "f82afd8c5e593156bc5b4dff9ac2b58e8ad4a80dfc1b6e4a750249d96c49768b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/deprecated_test.py_test_pytest_plugins_in_non_top_level_conftest_unsupported_no_top_level_conftest_test_pytest_plugins_in_non_top_level_conftest_unsupported_no_top_level_conftest.res_stdout_fnmatch_lines_": {"doc_hash": "7029e19677775891c7e6e022745003488edad915a685daae418f9a6c6ae1d3fb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/deprecated_test.py_test_pytest_plugins_in_non_top_level_conftest_unsupported_no_false_positives_test_pytest_plugins_in_non_top_level_conftest_unsupported_no_false_positives.assert_msg_not_in_res_std": {"doc_hash": "843d2c472155de62349d606a66d2d2e90108a0dbd1eae088cf996c854a2890d3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/deprecated_test.py_test_fixture_named_request_": {"doc_hash": "61e7ec3e2e216eec33342dd8652fb292d5b22313d46eea2b85a3ce9f9a7ac8a6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/acceptance/fixture_mock_integration.py__": {"doc_hash": "51ea49a0c199cc059792f7feb492c0897d4d0e520a6e13983e04f3327e046389"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/collect/collect_init_tests/tests/__init__.py__": {"doc_hash": "e1010179058c35b49cb1350eef9a8137eac31b5b0a9ea9bfdc10a0e80aa9f8f7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/collect/collect_init_tests/tests/test_foo.py__": {"doc_hash": "bbcad48462d53c52334f66bf59198a68e4b4db8573509cee47d932798de8c643"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/collect/package_infinite_recursion/conftest.py__": {"doc_hash": "ce995b3b863c4dc07d536e02513c34b375cbbe4ad59cf40b3a27d7fcb36d29f0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/collect/package_infinite_recursion/tests/test_basic.py__": {"doc_hash": "125256bc1fd9fcd280149f328ca7b0557a60216ba910b19e9f0adf6c19f6d607"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/collect/package_init_given_as_arg/pkg/test_foo.py__": {"doc_hash": "0391e5d76de9eba041e4bc6abfd4fe8775c1b5b876c21b9b684ec894fcb02e82"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/config/collect_pytest_prefix/conftest.py__": {"doc_hash": "263eadeb9eb3cf0ae2ee6b3bfd4100bf82bc26e929e2f3bced86731c701ea1a3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/config/collect_pytest_prefix/test_foo.py__": {"doc_hash": "2024a258b6e7a1fc7f06cb86356c11245ac5606cd2c6b6f531b397a3170e2937"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/conftest_usageerror/conftest.py__": {"doc_hash": "e9351af720cd748b5a6200fa40a6c359fa19f790655e0390ef538915c92b0aeb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/dataclasses/test_compare_dataclasses.py__": {"doc_hash": "8960c8ea0cdc93749dcb2b8555a60148999da8118f86711ceba7438a7b7892b9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/dataclasses/test_compare_dataclasses_field_comparison_off.py__": {"doc_hash": "5c880c755fa867755c01cb8d5b50c74ef8eed5a5a262d71bb87ba268510caa71"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/dataclasses/test_compare_dataclasses_verbose.py__": {"doc_hash": "b314d2723c53acb636be62e2d7a598bc97b7af6a2bfdba8d119b026d398e1008"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/dataclasses/test_compare_two_different_dataclasses.py_from_dataclasses_import_d_": {"doc_hash": "c3118bd6c8574f1e32c3ab24e961c069fed29d082583b7c003904f491b4772ec"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/deprecated/test_fixture_named_request.py__": {"doc_hash": "bb89328826673e8ecb6e2dcf5724f164899ca174ba0e90c5e6b8d4fa0f593ffc"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/custom_item/conftest.py__": {"doc_hash": "e991c1abdb37c55e5f7d64c6d4b30ab517bf9a85c27ff100a314a94cae09d1f7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/custom_item/foo/test_foo.py__": {"doc_hash": "0a4c892a2c72aa1fbd971fe4cdfe996a0be5201ef00f3d4fb8097342f565d5b6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub1/conftest.py__": {"doc_hash": "20b5fdc42c45a66ba2b76e874bf42c972b6b6cf650d1dfe102586e9ca23c702b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub1/test_in_sub1.py__": {"doc_hash": "9b7af6b8000886125b6b5b2b239934933d05d2852756c64a32927ef876c718e7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub2/conftest.py__": {"doc_hash": "fcf604761694f72d7ebd5d9c2357f3f434ca2b564e4a3a06e1c9255e36bdf913"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub2/test_in_sub2.py__": {"doc_hash": "53a540e364f100a1cfd8bc612dacf49a6e84ac53d787d9c9f294fbb5fd7fb675"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_detect_recursive_dependency_error.py__": {"doc_hash": "3820025a84873d1209b896c2a9998a0275bd7845183c513a21a543ab7cd43b25"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_conftest/conftest.py__": {"doc_hash": "367fe16779fbb95e5b97cda56271d3f328a09a5207dc0e5a93b3e9dc0d16999f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_conftest/pkg/conftest.py__": {"doc_hash": "b078b4f796651a003942d7456a5694db4ed7ea2036c5e6faa29d233000917215"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_conftest/pkg/test_spam.py__": {"doc_hash": "82692092d3dbb84d6f6d697f8aa373e6a374c5c70d82eb451371dd38cc222e36"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_module/conftest.py__": {"doc_hash": "c46928dd491ccaba0d93e2e3ab0ab336eabffc6fe7191926a563175724e3041e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_module/test_extend_fixture_conftest_module.py__": {"doc_hash": "2e8ec5219d8bd462d1bef18f904209be5ca64738f423ca6e1ae939e1b93d2291"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_module_class.py__": {"doc_hash": "3d55f79452594c07bf951ec163fbae0ac2595d7c0f876acaa12e2bf970fc5446"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_funcarg_basic.py__": {"doc_hash": "94dc812e2afc7ddebfad9a8b78316f72902c9f34ae645f00f585331d8e343741"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_funcarg_lookup_classlevel.py__": {"doc_hash": "f5c43723f21269ef8fa5ee6bee9bdc23fdac706cf31ba02924becf016569df06"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_funcarg_lookup_modulelevel.py__": {"doc_hash": "91f5c8ef5c8ce872f8112056f4640058edb595b24342368bd4db36afde75f3f5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_funcarg_lookupfails.py__": {"doc_hash": "e4a9bca2018f283496956fe36dee4d0ae4b4054c81911ff399a6806d13790759"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/test_getfixturevalue_dynamic.py__": {"doc_hash": "60dffb7417f0cdb1ed8098c68bdab80f45e659b96e27385c4ff8584c01096759"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/issue88_initial_file_multinodes/conftest.py__": {"doc_hash": "32d070c1054ed7782c6b93db502d8512e4c04c07ecb2cc05ff6911cac3c78f60"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/issue88_initial_file_multinodes/test_hello.py__": {"doc_hash": "af3329e0ce140e32d140a87182ed4fd82a88564df3a2b084ead8a0101ac09979"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/issue_519.py_pprint_checked_order.assert_order_": {"doc_hash": "347c8e8f6afc007c280f7027371479869c7483598ccb849a630408a2608d36da"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/issue_519.py_fix1_": {"doc_hash": "bc482f95b6daf2e26c7b40777609eb1d110334ea83f5241d3911ab59eefe8342"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/marks/marks_considered_keywords/test_marks_as_keywords.py__": {"doc_hash": "7c77f1259e2b41499c5af40a1a7c8bbae1fe5d653cf83064c21d922ac5f4f35a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/perf_examples/collect_stats/generate_folders.py_argparse_": {"doc_hash": "4a5a6f976e22616f3ece5b07b5af400437011642dfd0406bd7fe226a5aa2acf8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/perf_examples/collect_stats/template_test.py__": {"doc_hash": "603fa8a847aa9745ac76ad96c72d7aadb9113c5ac7c9ae0c55568e17fdd906ea"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/tmpdir/tmpdir_fixture.py__": {"doc_hash": "003a709becb574b93ebdc109d56a834356e5a9f38d77bc98346a12b5a3d81a33"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/unittest/test_parametrized_fixture_error_message.py__": {"doc_hash": "69a0fd86fa6961111d5a8ddb09daee04e04b445cb34e6580df75ad4b3b39ebca"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/unittest/test_setup_skip.py__": {"doc_hash": "254add6ae9f26e9dc47206fd37045ae952a14ef4fe51aef533f67af370ab295b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/unittest/test_setup_skip_class.py__": {"doc_hash": "516e9c64d1de8d3e6f66ea9bee90b6d7b690f208e759e4a49bac77b8ffb63934"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/unittest/test_setup_skip_module.py__": {"doc_hash": "57f6bd9a8405abbd059ac5a66d4eb1859909ce35ef0f8dedc956fb1fef48298b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/warnings/test_group_warnings_by_message.py__": {"doc_hash": "c52c2b2075f85db484a5a65c674b854a731acb2fd4cada985f61848436dfabff"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/examples/test_issue519.py__": {"doc_hash": "6d8cc278c5ae87dc8821f3ec88bdb32be76b4a843f6d1696702be398a26e8b65"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/freeze/create_executable.py__": {"doc_hash": "3821f3787e5c26d694c652b6d8451083de4920567c12c4c760c6833865fb27de"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/freeze/runtests_script.py__": {"doc_hash": "5e97474e70cb25fd350556bafb769ba7f1b930702f5d8ae4dc0acb3b93d795cf"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/freeze/tests/test_trivial.py__": {"doc_hash": "6a70f07ec06ad1917b2d31d27f2a20012c3fd9900501591469631cc505ec6670"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/freeze/tox_run.py__": {"doc_hash": "89283e1252a88b0ca47d3d8334e1c98c1cc71d50c87313c58a66710c40e12594"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/io/test_saferepr.py__coding_utf_8__test_maxsize_error_on_instance.assert_s_0_and_s_": {"doc_hash": "d0bdea86b657aa8f2fd255556a013a16b76462a62898561e4e6080a8b897121e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/io/test_saferepr.py_test_exceptions_test_exceptions.assert_unknown_in_s2": {"doc_hash": "e7aaaca4ecbc0501773b85663608d7e173db292cede59576b6a66cf99b126835"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/io/test_saferepr.py_test_big_repr_": {"doc_hash": "5edf8f1801ea11e032cb7a4327b5b0276ea5b31d4ae26ffb0825a8cce780f5c0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_fixture.py__coding_utf_8__test_change_level.assert_CRITICAL_in_capl": {"doc_hash": "147d13f13b0b02e5408f9ae25bb0987b36fd1a21b49321e96013a251ed206a5e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_fixture.py_test_change_level_undo_test_change_level_undo.assert_log_from_test2_n": {"doc_hash": "a666ce71699cd9b5cdc13bd0c67933860900b3622fe8ba07a96e8e40afcc824e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_fixture.py_test_with_statement_test_log_access.assert_boo_arg_in_caplo": {"doc_hash": "2bf2761222a7b2e04a001c48a8e66d52fbff284a16899cbc89afef978503af20"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_fixture.py_test_messages_test_messages.assert_Exception_not_in": {"doc_hash": "5b56ad2aef3b51899c86deff20b57849de06f407d09bcf168f6eb66112b30ab4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_fixture.py_test_record_tuples_logging_during_setup_and_teardown.assert_x_message_for_x_i": {"doc_hash": "429b479647a9caaea0af5e50fa10fa0295eb68a9b209fdd904f6a4003fb1c9b4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_fixture.py_test_caplog_captures_for_all_stages_": {"doc_hash": "08b0dcca1701060b692d893927c582e0c25d0053b580b741ece59721ae5e1e34"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_formatter.py_logging_": {"doc_hash": "864b73ada928f99fdfdffd78a229f855e4ca74251e400bcd9074a7a662b32cd7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py__coding_utf_8__test_nothing_logged.with_pytest_raises_pytest.result_stdout_fnmatch_lin": {"doc_hash": "70c7d8c67e701472ef8848f853e49295a4b36b6210ded5f125b94317326a310a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_messages_logged_test_messages_logged.None_3": {"doc_hash": "8730f84cfe37a69aa2e7ef60d00663ec33a701485fe3e76dbe459fa82332bb55"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_root_logger_affected_test_root_logger_affected.with_open_log_file_as_rf.assert_error_text_going_": {"doc_hash": "b403eefecdf19b6e67a586f5d39bf29e2e1f88dac0b0645b7e81bd2669fd721a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_cli_level_log_level_interaction_test_log_cli_level_log_level_interaction.assert_DEBUG_not_in_res": {"doc_hash": "b5ade4f639dc7902fdecc2643caaae0cdc316f4d17a92e089efef06110c31926"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_setup_logging_test_setup_logging.result_stdout_fnmatch_lin": {"doc_hash": "8c8d3f7893273a2f0ef13d6de0dabbf1786b269d38a32f4e84f3910d5a2be631"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_teardown_logging_test_teardown_logging.result_stdout_fnmatch_lin": {"doc_hash": "9082f421495cfe3894baf93c974b0db16a4c67159694c7fe779a9b1dc6d2feb9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_disable_log_capturing_test_disable_log_capturing.with_pytest_raises_pytest.result_stdout_fnmatch_lin": {"doc_hash": "44a00e73a37683b1da1ad37299f0ff66e14b5e2b57f94477d29f791b33a46c2a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_disable_log_capturing_ini_test_disable_log_capturing_ini.with_pytest_raises_pytest.result_stdout_fnmatch_lin": {"doc_hash": "4aeeabb46740ccda2d37cb328db5bc8bcd6187c817cce2fd81d3e2c8bfb0ceb3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_cli_enabled_disabled_test_log_cli_enabled_disabled.None_1.else_.assert_msg_not_in_result_": {"doc_hash": "8583756bdc4b889540bdcc5d41b875b18836a7bf6a6a400a5c1c921778ae2273"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_cli_default_level_test_log_cli_default_level.assert_result_ret_0": {"doc_hash": "4719bb487b58d490878330a110c56d4b44b33cb3c5054eb60e073a62c059ae5f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_cli_default_level_multiple_tests_test_log_cli_default_level_multiple_tests.result_stdout_fnmatch_lin": {"doc_hash": "66553b6bd820887afb3726d6d774f5fbecca635d6089920d71f77adbad9c735e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_cli_default_level_sections_test_log_cli_default_level_sections.result_stdout_fnmatch_lin": {"doc_hash": "1ee6dba10842bdb9e5859a254a658e74d4b86412afbb9d434435214df004f348"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_live_logs_unknown_sections_test_live_logs_unknown_sections.result_stdout_fnmatch_lin": {"doc_hash": "56e7875908597891d71cc037104e2491977e1786a7766db2ad2c2dc1f4077e5e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_sections_single_new_line_after_test_outcome_test_sections_single_new_line_after_test_outcome.None_1": {"doc_hash": "ee1bd820cae91e3356b6d24f4274576fdeeced4826597181281f354a329fc15c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_cli_level_test_log_cli_level.None_3": {"doc_hash": "be9b3d80ae47f7d2caa2259c437289def607a5a3466c1f95427dc377c82caee4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_cli_ini_level_test_log_cli_ini_level.assert_result_ret_0": {"doc_hash": "05a53236bff2956992dad24153eb72572a04e6879e38c01fca7f90583368bfef"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_cli_auto_enable_test_log_cli_auto_enable.if_cli_args_log_cli.else_.assert_WARNING_not_in_s": {"doc_hash": "ec4ce732e1bfd5bfce08a46f5b3b40130b462805e01b6647f370e13901679c74"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_file_cli_test_log_file_cli.with_open_log_file_as_rf.None_1": {"doc_hash": "9055a368b4eee7949419f4ee7a9fbd0e868ae008c758863484511eb3ccae2e7f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_file_cli_level_test_log_level_not_changed_by_default.result_stdout_fnmatch_lin": {"doc_hash": "99c99504881606050d396998cce20fa06fbe8eda0589d3622e4ca4f50858a5b4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_file_ini_test_log_file_ini.with_open_log_file_as_rf.None_1": {"doc_hash": "bf6d930f170400c883089e6c4228a1b987b94c9cfb48d36a280076c4bb05f16b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_file_ini_level_test_log_file_ini_level.with_open_log_file_as_rf.None_1": {"doc_hash": "590e173d70b095dc78c89211072a67603e0490df4c385107a6a9624aaba9ae14"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_file_unicode_test_log_file_unicode.with_open_log_file_encod.assert_Another_normal_me": {"doc_hash": "858d82560eb41eabb125f5f4fe3dac8ab51c529aa5bc0143e0ebf02be127fc8b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_live_logging_suspends_capture_test_live_logging_suspends_capture.assert_out_file_getvalue_": {"doc_hash": "f4c4613783f477dce7c162068b685463be2411d1417cc1cb49083d4b375a4915"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_collection_live_logging_test_collection_logging_to_file.with_open_log_file_encod.assert_info_message_in_t": {"doc_hash": "e7add4b9ac76e42ee7026b7b1472aeec27d1664241390b14e3330e7faaf6ba01"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_in_hooks_test_log_in_hooks.with_open_log_file_as_rf.assert_sessionfinish_in": {"doc_hash": "adc5efeba75f8bf6b7af3e2abf2d1ae2ebadab3489303cb918311e47dd34fc0f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_in_runtest_logreport_test_log_in_runtest_logreport.with_open_log_file_as_rf.assert_contents_count_lo": {"doc_hash": "9e9375489ad30cd371f010aef03eddecb4c174943336f4c69b4bb9d0285cfbf7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_set_path_": {"doc_hash": "1b02c47244abcc9e1f50e3111ea73db7dd836b38e0b7687e654c8b579c7cac3c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py__encoding_utf_8_MyDocTestRunner.report_failure.raise_AssertionError_": {"doc_hash": "5c92227fc30072291857c157143195db80e42babc7c6eefe89efbe2ac8f63dab"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox_TestApprox.test_repr_string.assert_repr_approx_a_": {"doc_hash": "d8aa21a5bb3ddf4409d03457bde3c0318a59536a5a2b06959b2c5a7b85372a30"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_repr_nd_array_TestApprox.test_repr_nd_array.assert_repr_approx_np_arr": {"doc_hash": "ec1f47245dc0f84b93d884225e8653324097b5fadbeee9cdb54b77b4beee5555"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_operator_overloading_TestApprox.test_exactly_equal.for_a_x_in_examples_.assert_a_approx_x_": {"doc_hash": "7e996983dbcfb542a4bf092c1552b08c1008234ac7d415dfea7d9ba5154c7376"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_opposite_sign_TestApprox.test_zero_tolerance.for_a_x_in_within_1e10_.None_5": {"doc_hash": "7212f19fd661394429259a213fc47869b9dee26c58f3dcc88a91affb2344259d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_negative_tolerance_TestApprox.test_negative_tolerance.for_kwargs_in_illegal_kwa.with_pytest_raises_ValueE.1_1_approx_1_kwargs": {"doc_hash": "f048dca76c4a5dede67c59e600c2c568e1bcab3a5547b150015e83e04077ce60"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_inf_tolerance_TestApprox.test_inf_tolerance.for_a_x_in_large_diffs_.None_3": {"doc_hash": "7b9dc1713833a61796d830a59ce3ddcb9fbf6454a3041355d9c57b6b9bf9df7c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_inf_tolerance_expecting_zero_TestApprox.test_reasonable_defaults.assert_0_1_0_2_appro": {"doc_hash": "520a0a190c1bea41e9eb481b6976679bd368dd9874f3807df0666a455d3525c8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_default_tolerances_TestApprox.test_default_tolerances.for_op_a_x_in_examples_.assert_op_a_approx_x_": {"doc_hash": "6babae1244debb6ba9d488725207adaaf5eec7073bdaea411805cd61617702af"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_custom_tolerances_TestApprox.test_custom_tolerances.None_11": {"doc_hash": "01c16635c391bb9f54e044cdfae7be47074fdd2b2bcd52c890dd0a12f0e3c50a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_relative_tolerance_TestApprox.test_relative_tolerance.for_a_x_in_within_1e8_re.None_1": {"doc_hash": "4ce14e78847932f48aa205e2d7861829c5c03cbca37b4178dbcfaae2d8da8035"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_absolute_tolerance_TestApprox.test_absolute_tolerance.for_a_x_in_within_1e8_ab.None_1": {"doc_hash": "36cdaec3e58f7f89862cddb1f86ab8ede59b6407006b7e35b89fc53d59c65dbd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_expecting_zero_TestApprox.test_expecting_zero.for_op_a_x_in_examples_.None_1": {"doc_hash": "3e33531f128c33eefeb03d089b71e01903969cea7193c228fbba415261af597b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_expecting_inf_TestApprox.test_expecting_nan.for_op_a_x_in_examples_.assert_op_a_approx_x_na": {"doc_hash": "3561a91041810b9250f47a2f16f0eebede5f80a71a0a7407ed426eae3f2b2bc9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_int_TestApprox.test_int.for_a_x_in_within_1e6_.assert_approx_x_rel_5e_7": {"doc_hash": "5dac6647136c1b4d2208bc4e4937c97f63e25d363740d3caf43b78af5305b658"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_decimal_TestApprox.test_decimal.for_a_x_in_within_1e6_.None_4": {"doc_hash": "31ab60dc7b43a3655d40b87510ec5eb897d6997c2db89ef2ef5bb7d6b426d7b1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_fraction_TestApprox.test_fraction.for_a_x_in_within_1e6_.assert_approx_x_rel_5e_7": {"doc_hash": "ae2500b0d3cfb9e6751a395fdd2669ec61aa68c7543ff318a27a8e4c16d730b1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_complex_TestApprox.test_complex.for_a_x_in_within_1e6_.assert_approx_x_rel_5e_7": {"doc_hash": "0c4d0ef63210541ef80eb5bef0ec9010ebfe0b32ae49f8c25331a92a113d8aa4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_list_TestApprox.test_list.None_3": {"doc_hash": "f211e328ca0da0a872283a8b70d4e292ef64c3734f29c8f9e2e157008bc41f31"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_list_wrong_len_TestApprox.test_tuple_wrong_len.None_1": {"doc_hash": "5735dd7a16f4465741aae90daa73fe2653d5ffe00ac4cbc6816de27acc5c4d46"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_dict_TestApprox.test_dict_wrong_len.None_2": {"doc_hash": "3d4c51880fde6fc76354e323ec3c3b10ac99c42a9aa37d1d7847248553b98eee"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_numpy_array_TestApprox.test_numpy_array.None_7": {"doc_hash": "44e8f790820b70f1c2206a9c63f6945b05986a6d79423d409f80ea0ca817eb48"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_numpy_tolerance_args_TestApprox.test_numpy_tolerance_args.for_op__abs__rel_in_tes.None_5": {"doc_hash": "fedd14caab3cb817b3377c645259250102c23294b07190154c6c33c4bcbb00e9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_numpy_expecting_nan_TestApprox.test_numpy_expecting_nan.for_op_a_x_in_examples_.assert_op_a_approx_np_ar": {"doc_hash": "d442afa28ea729bfdd2f0fe49da11249bef89d7af86782e97e168c1ecdfa20ff"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_numpy_expecting_inf_TestApprox.test_numpy_expecting_inf.for_op_a_x_in_examples_.None_2": {"doc_hash": "cf52a20fdb0d697016e34ea7b64ac790698dcbd0fb4cd7b1cff766e7d50b816a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_numpy_array_wrong_shape_TestApprox.test_doctests.runner_run_test_": {"doc_hash": "e214db8998148f5015e73b7850a22727046bca6af8a14751dd398ff9e638145a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_unicode_plus_minus_TestApprox.test_unicode_plus_minus.result_stdout_fnmatch_lin": {"doc_hash": "6ccb8e811fbaf15b01a9cd7b2662b782b06548f9c0fd00cfd92e2d3c2ce459a1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_expected_value_type_error_TestApprox.test_comparison_operator_type_error.with_pytest_raises_TypeEr.op_1_approx_1_rel_1e_6_": {"doc_hash": "583e16c6377a3d032672fb94de4fe5f4be2cd623df985ac867175a25cd4cbe84"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_numpy_array_with_scalar_TestApprox.test_numpy_array_with_scalar.None_3": {"doc_hash": "3164983e5c77d4c27ff9cf8badc34fd75f6386095d5dc125409f16acd01bfa3f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_numpy_scalar_with_array_": {"doc_hash": "a11075df17321a8eb9747145d7c92c07095ec11b5f72ddf64326fa38ac39a1c3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py__coding_utf_8__TestModule.test_import_duplicate.result_stdout_fnmatch_lin": {"doc_hash": "751f465b6c809d038ca8d9307d8208f9db9e246106953d83daa7f025b95d731b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestModule.test_import_prepend_append_TestModule.test_import_prepend_append.with_root2_as_cwd_.None_1": {"doc_hash": "0a2f879f267298b8d913f75ced40cb90fbba183efcc6adffa43035b80092d957"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestModule.test_syntax_error_in_module_TestModule.test_invalid_test_module_name.result_stdout_fnmatch_lin": {"doc_hash": "88c98248f1e86dd6e54adc773e1add22ea8615ef46a5d25880a1707297d71b01"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestModule.test_show_traceback_import_error_TestModule.test_show_traceback_import_error.for_name_in__pytest_o.if_verbose_2_.else_.assert_name_not_in_stdout": {"doc_hash": "56a13840d3d46a1b80d2042f41e5c0fa08eebc250dfc5f18d4cdaa67d71bd7b3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestModule.test_show_traceback_import_error_unicode_TestModule.test_show_traceback_import_error_unicode.assert_result_ret_2": {"doc_hash": "b2b810c1d3450925b315b883c789913dd1f4a714fbcde88aef9c4c7ab842c214"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestClass_TestClass.test_class_subclassobject.result_stdout_fnmatch_lin": {"doc_hash": "dd5c26b44fff6aa4b5ea729f36d2a7678b88288e36390528ae0b8b3ea4fdea5c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestClass.test_static_method_TestClass.test_static_method.result_stdout_fnmatch_lin": {"doc_hash": "0f98a448a732d6b6ee2b8afdcf90cfd77840dbc2fbebede1376885c2ce50e8b2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestClass.test_setup_teardown_class_as_classmethod_TestClass.test_issue2234_property.assert_result_ret_EXIT": {"doc_hash": "7f9a71ab8529ef1637f9f65f852b67b481b26240703af54ebc475eb738d1c7d3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction_TestFunction.test_function_as_object_instance_ignored.result_stdout_fnmatch_lin": {"doc_hash": "c6cfba7d79892e6969ce9265878a056015bc4e8262adb4ebb265325097bbbeaa"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.make_function_TestFunction.test_issue213_parametrize_value_no_equal.reprec_assertoutcome_pass": {"doc_hash": "a4fd5eb2d4e9de09c02b0d78669a84c4cba683a305194e5c4d6f53723b0a5955"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_parametrize_with_non_hashable_values_TestFunction.test_parametrize_with_non_hashable_values.rec_assertoutcome_passed_": {"doc_hash": "f058949df9e3046fdeea0963f9ed145c32b7ccfb8cd448c443eea67b69f5f267"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_parametrize_with_non_hashable_values_indirect_TestFunction.test_parametrize_with_non_hashable_values_indirect.rec_assertoutcome_passed_": {"doc_hash": "3e79e0439d27c8b77528d37d85de48150df060489f71658e74e167e92bb9eb3e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_parametrize_overrides_fixture_TestFunction.test_parametrize_overrides_fixture.rec_assertoutcome_passed_": {"doc_hash": "fac729e2678c0e2b4861999b3de92bad537c8cae2f695f5b50211abcba50d25a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_parametrize_overrides_parametrized_fixture_TestFunction.test_parametrize_overrides_parametrized_fixture.rec_assertoutcome_passed_": {"doc_hash": "82732d53f49f8609bbc8f76b0edadab5d96f5663d4552c81cd929651ab22202b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_parametrize_overrides_indirect_dependency_fixture_TestFunction.test_parametrize_overrides_indirect_dependency_fixture.rec_assertoutcome_passed_": {"doc_hash": "009775a73e39f113a486e9641e56f55ff7f9650d27a36118f73e4b2ab2ab6713"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_parametrize_with_mark_TestFunction.test_parametrize_with_mark.assert_foo_in_keywords_": {"doc_hash": "36b28c24415944ab3f031ee3f6fa18bc4180d270d0948580904444dbeaa4c15e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_function_equality_with_callspec_TestFunction.test_pyfunc_call.config_hook_pytest_pyfunc": {"doc_hash": "fde3b8ba7c6c9d9199a5f43e740ce936d286a8c2d44de4af376ab9654a024bc8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_multiple_parametrize_TestFunction.test_multiple_parametrize.assert_colitems_3_name_": {"doc_hash": "d290a0cafffd9e9c6f3425a8ba2e2a061e00f0cd2e21197288e068535922c5c4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_issue751_multiple_parametrize_with_ids_TestFunction.test_issue751_multiple_parametrize_with_ids.assert_colitems_3_name_": {"doc_hash": "5b4e773db43e86cf1847a339aae21458e1779a3a9b638831acdd02c5a71bcf44"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_parametrize_skipif_TestFunction.test_function_original_name.assert_x_originalname_fo": {"doc_hash": "bcf4990545021cf355f1082e7546412906a5cbe3ebe0ffd9df7c17eb9210da9f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestSorting_TestSorting.test_check_equality.for_fn_in_fn1_fn2_fn3_.assert_modcol_fn": {"doc_hash": "f52b6946c41ddc8c5760e5d9dbc01d34a1cfffa990927c68d95774da7313441e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestSorting.test_allow_sane_sorting_for_decorators_TestSorting.test_allow_sane_sorting_for_decorators.assert_item_name_for_ite": {"doc_hash": "a2618773780bacd1efad90efdb80cdf7839c56e78a1c4e8b8d8986701c6d7127"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestConftestCustomization_TestConftestCustomization.test_pytest_pycollect_module.result_stdout_fnmatch_lin": {"doc_hash": "e43ceed4d8566665023baf610cac99d3694c83989daac856d37fb2e033224c16"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestConftestCustomization.test_customized_pymakemodule_issue205_subdir_TestConftestCustomization.test_customized_pymakemodule_issue205_subdir.reprec_assertoutcome_pass": {"doc_hash": "73d284745ff639e2167fcef207145e21b1b94dc9c6b4ec0a11c7f22f816d4a6a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestConftestCustomization.test_customized_pymakeitem_TestConftestCustomization.test_customized_pymakeitem.reprec_assertoutcome_pass": {"doc_hash": "7336eb8388922905ce110f4a7ffe646544e0a104ccb2546e00b02118cd3263f9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestConftestCustomization.test_pytest_pycollect_makeitem_TestConftestCustomization.test_makeitem_non_underscore.assert__hello_not_in_va": {"doc_hash": "dbcee7fb65ee63104247ef41b7088f1a21137ac681cb1140786d16c757eab68b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestConftestCustomization.test_issue2369_collect_module_fileext_TestConftestCustomization.test_issue2369_collect_module_fileext.result_stdout_fnmatch_lin": {"doc_hash": "714f37207c6b9246a2d326856fea07906a8b6cb866cc5362ea79da06b7603db8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_setup_only_available_in_subdir_test_setup_only_available_in_subdir.result_assert_outcomes_pa": {"doc_hash": "c00a9887d60f7e38adf2db6491a9bd2f06bd9169e02d141d5c22d3e5ab0816b1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_modulecol_roundtrip_TestTracebackCutting.test_traceback_argsetup.assert_numentries_3": {"doc_hash": "2eada9f4ed5ae3352ebe2ed87b5bac1ba9760c3a5f81d93bfce17025861869ec"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestTracebackCutting.test_traceback_error_during_import_TestTracebackCutting.test_traceback_error_during_import.None_2": {"doc_hash": "9570434202c19f94dff84c14669d3ac26300f4e1b145bd546ab0b067cc947511"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestTracebackCutting.test_traceback_filter_error_during_fixture_collection_TestTracebackCutting.test_traceback_filter_error_during_fixture_collection.result_stdout_fnmatch_lin": {"doc_hash": "8edfe0f5f085ccb790677686b7df58f4c1563373cd3853a6355f586d1930ce18"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestTracebackCutting.test_filter_traceback_generated_code_TestTracebackCutting.test_filter_traceback_generated_code.assert_not_filter_traceba": {"doc_hash": "ef17564c35714ecc6749046de7138e1ec20331b97641b1bdb8cf703fb1916598"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestTracebackCutting.test_filter_traceback_path_no_longer_valid_TestTracebackCutting.test_filter_traceback_path_no_longer_valid.assert_filter_traceback_t": {"doc_hash": "762aa0bb12263040ee909d10112f6c0bbe27ac2f7a7ea3f987f30496a1cb3870"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestReportInfo_TestReportInfo.test_itemreport_reportinfo.assert_item_location_": {"doc_hash": "ab744634d5abaeaefee1c9f1693c89423d694035c7e2c1e4ebecdd1fcdde7475"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestReportInfo.test_func_reportinfo_TestReportInfo.test_class_reportinfo.assert_msg_TestClass_": {"doc_hash": "5288d6b6715c22a93e57b7b8f02c1d554fbf51f9172b4e3139fef93acb5e728e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestReportInfo.test_reportinfo_with_nasty_getattr_TestReportInfo.test_reportinfo_with_nasty_getattr.fspath_lineno_msg_ins": {"doc_hash": "6b2145d2a8410626289cc7f790d29eedffe0de797043fac702081b9f4b105334"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_customized_python_discovery_test_customized_python_discovery.None_3": {"doc_hash": "6b7740cb101227e90ce00db9b168d608b8f39b5a2b5d2f023fed6afd3c74c180"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_customized_python_discovery_functions_test_unorderable_types.assert_result_ret_EXIT": {"doc_hash": "d846ee47947f5fbda700c9b561bf7fb860c6d658e9ae5cc693fbc3ebb8d176c7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_collect_functools_partial_test_collect_functools_partial.result_assertoutcome_pass": {"doc_hash": "a31513e1e5afd0222822cfc25534e6ed48ade044fc6639dfb71c7b26ab4791f1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_dont_collect_non_function_callable_test_dont_collect_non_function_callable.result_stdout_fnmatch_lin": {"doc_hash": "55afa191ab78dea3296b24b28d4dbe4e6d2823c209a2cd52bf042098d121670d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_class_injection_does_not_break_collection_test_class_injection_does_not_break_collection.result_stdout_fnmatch_lin": {"doc_hash": "660c1658d61e955c2f5066cc4566ab98192f5c8486ff15d3345c0f8dbcfe2797"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_syntax_error_with_non_ascii_chars_test_skip_duplicates_by_default.result_stdout_fnmatch_lin": {"doc_hash": "4aea87f97ff60636f34edb9b26ecd6a516e00c36cde287b8c00109722de1c7c9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_keep_duplicates_test_keep_duplicates.result_stdout_fnmatch_lin": {"doc_hash": "1daf4bf99758cc16285c18a0e5dcab43c92fa612cc485ea53e544b4177dfad0a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_package_collection_infinite_recursion_test_package_collection_init_given_as_argument.result_stdout_fnmatch_lin": {"doc_hash": "096ff5a1c4a1dfb14d30471eb80cf9937c3f80209fdf9bb6ad32706e97400743"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_package_with_modules_test_package_with_modules.None_7": {"doc_hash": "a4e5717412bd8782243384547560a4afa502a454a58cc39b134efd120d7a5e50"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_package_ordering_": {"doc_hash": "befeb8019bd8332ce7105b0e14cfde967c705addf40588b8e66c3f015ac79d1f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py__coding_utf_8__test_getfuncargnames.None_5": {"doc_hash": "00fe61cb792705c90fd9d881c28fcd103795ccc74c7ba52b0852873cbb053ecf"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures_TestFillFixtures.test_extend_fixture_conftest_conftest.None_1": {"doc_hash": "022320b31e32d1c4c4eef8d82369fa7de0ccd04a6bc861c4193d79b11748d645"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_extend_fixture_conftest_plugin_TestFillFixtures.test_extend_fixture_conftest_plugin.assert_result_ret_0": {"doc_hash": "bfed6cd2b6f2f88518e023f7249defc92a11a5eb6ee958b44c905151298e9449"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_extend_fixture_plugin_plugin_TestFillFixtures.test_extend_fixture_plugin_plugin.assert_result_ret_0": {"doc_hash": "ad20d01d078c6fefab180bb2814b7517af3ae202a97f6fee36873ff940c1bb18"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_override_parametrized_fixture_conftest_module_TestFillFixtures.test_override_parametrized_fixture_conftest_module.None_2": {"doc_hash": "ddded4c3ed3532ba5513038280f437d6129e1206e213bf0ff7ed14b82582880f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_override_parametrized_fixture_conftest_conftest_TestFillFixtures.test_override_parametrized_fixture_conftest_conftest.None_4": {"doc_hash": "711ccd9b6d58744ecc67f350aadf92474542c27915360597990be37babdd2ae5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_override_non_parametrized_fixture_conftest_module_TestFillFixtures.test_override_non_parametrized_fixture_conftest_module.None_2": {"doc_hash": "3dfece2e7b9914358c8d72307541709f64294389e91d8ba1a26f90f132b9d89b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_override_non_parametrized_fixture_conftest_conftest_TestFillFixtures.test_override_non_parametrized_fixture_conftest_conftest.None_4": {"doc_hash": "92e1fbf92ba3b0b2b34f1b0420d836accc87105024464f11f22ed1be47e08972"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_override_autouse_fixture_with_parametrized_fixture_conftest_conftest_TestFillFixtures.test_override_autouse_fixture_with_parametrized_fixture_conftest_conftest.None_4": {"doc_hash": "04e43fa14aa19be6a1135780a9469f31471b0f041de3300d3dff4fb09a02f5e6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_autouse_fixture_plugin_TestFillFixtures.test_autouse_fixture_plugin.assert_result_ret_0": {"doc_hash": "f34a4a55609ada236ad575cae1af45ec9247b23f08c3c10d8044da459fe6cd81"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_funcarg_lookup_error_TestFillFixtures.test_funcarg_lookup_error.assert_INTERNAL_not_in_": {"doc_hash": "1143861450bec8bcd794e3bf4924082ffb227d0fca0f9798c0e6541e21c68c7c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_fixture_excinfo_leak_TestFillFixtures.test_fixture_excinfo_leak.assert_result_ret_0": {"doc_hash": "9645b1dd396fb6d447749110edf90cfcd9a2b8d5c8dcb40ddcf0f3d4c9743461"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic_TestRequestBasic.test_request_attributes_method.assert_req_instance___cla": {"doc_hash": "65a725cc93c20ac048a06ca8144c73488be6cd4715e7050135127ef3f886f72b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_contains_funcarg_arg2fixturedefs_TestRequestBasic.test_request_contains_funcarg_arg2fixturedefs.assert_arg2fixturedefs_s": {"doc_hash": "9c66ee34c8bdcdcec7b42cc7c379b8ad3aceee0df5752c96f1cacdc98d8c26d6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_garbage_TestRequestBasic.test_request_garbage.result_stdout_fnmatch_lin": {"doc_hash": "e3a0317f698187bba64ee07ecc157f5687944be51da8f4030098ff400791fd76"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_getfixturevalue_recursive_TestRequestBasic.test_getfixturevalue_recursive.reprec_assertoutcome_pass": {"doc_hash": "2f4bf0f667b427c09b30f272fc425029501f53e2eea2c97e9b57b9fd19b6acf8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_getfixturevalue_teardown_TestRequestBasic.test_getfixturevalue_teardown.result_stdout_fnmatch_lin": {"doc_hash": "805a376100869de842bb8085ff5fcf03c2e98e94cc1b25da2ce739bf3867f39a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_getfixturevalue_TestRequestBasic.test_getfixturevalue.with_warning_expectation_.assert_request_in_item_": {"doc_hash": "087dcc1f20e398436f795d0abe110d50f51fcdff4d35aa7f48d1459161e7e35c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_addfinalizer_TestRequestBasic.test_request_addfinalizer.assert_teardownlist_1": {"doc_hash": "730d1ecd3aeef206851362fb8ca62bd11b0c5617e35658e9cb7bee868a09a54b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_addfinalizer_failing_setup_TestRequestBasic.test_request_addfinalizer_failing_setup.reprec_assertoutcome_fail": {"doc_hash": "198e398ebc4a04f43b70b0e087cc7b831fa4c573365b0ddc93044fa487c77afb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_addfinalizer_failing_setup_module_TestRequestBasic.test_request_addfinalizer_failing_setup_module.assert_not_mod_values": {"doc_hash": "9cda612367cb70139ba4510733c12a9c75386cd1ca6923a9bb8b61b45831e640"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_addfinalizer_partial_setup_failure_TestRequestBasic.test_request_addfinalizer_partial_setup_failure.result_stdout_fnmatch_lin": {"doc_hash": "98a694eb9e24b13d0545d079a1d59a917eb3c6c99b8419d943d3c71d055d5f46"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_subrequest_addfinalizer_exceptions_TestRequestBasic.test_request_subrequest_addfinalizer_exceptions.result_stdout_fnmatch_lin": {"doc_hash": "932c63853ace2526d7521347974996dac413215e50ee6ea88431125231277dd2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_getmodulepath_TestRequestBasic.test_request_fixturenames.reprec_assertoutcome_pass": {"doc_hash": "5d6e80e8067e8dca9b9364960a0cc06585382457b36b975fcb1ba20125479ffe"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_fixturenames_dynamic_fixture_TestRequestBasic.test_funcargnames_compatattr.reprec_assertoutcome_pass": {"doc_hash": "86489d65079dcbcdb67f4e0fec926707a05db048b82192c8d162bde1330af2c1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_setupdecorator_and_xunit_TestRequestBasic.test_setupdecorator_and_xunit.reprec_assertoutcome_pass": {"doc_hash": "9d03cbf4e5e56aa4233c2d7ff10469c139b42bc5277815d81554f44f9cb0df45"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_fixtures_sub_subdir_normalize_sep_TestRequestBasic.test_fixtures_sub_subdir_normalize_sep.result_stdout_fnmatch_lin": {"doc_hash": "45dfc9c564e0b82ee7386c1c8e1666056fedbb6f9cf6f4be7a58e8b5ce41e231"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_show_fixtures_color_yes_TestRequestBasic.test_newstyle_with_request.reprec_assertoutcome_pass": {"doc_hash": "85fdcdf58ee32a22e9538d414cb7cbdcf994a5276a8b391f2e845fd1452d329d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_setupcontext_no_param_TestRequestBasic.test_setupcontext_no_param.reprec_assertoutcome_pass": {"doc_hash": "79b17bd70e9549ee13846625419e4f47e474f10274348d5f03072efb659a163a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestMarking_TestRequestMarking.test_accesskeywords.reprec_assertoutcome_pass": {"doc_hash": "6c405d7f00bb790260a6bbb2b23a339635791f87a0e4daaba342deb4654f8535"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestMarking.test_accessmarker_dynamic_TestRequestMarking.test_accessmarker_dynamic.reprec_assertoutcome_pass": {"doc_hash": "16567f8479174e0788967eae7dbebaa3be6fa266b663ad778c58fe18cc503330"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages_TestFixtureUsages.test_receives_funcargs.reprec_assertoutcome_pass": {"doc_hash": "a9c9a89742bbb8a86c60628d0f9cdc7bb719f15ae85b4c98db8dba3e34f21921"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_receives_funcargs_scope_mismatch_TestFixtureUsages.test_receives_funcargs_scope_mismatch.result_stdout_fnmatch_lin": {"doc_hash": "70d8004e49dc1814976cd2697c756211bf35c5a7990ef8e5ce50f1311060acd4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_receives_funcargs_scope_mismatch_issue660_TestFixtureUsages.test_invalid_scope.result_stdout_fnmatch_lin": {"doc_hash": "0bb9f43942fe932d9f0af89d740c51897686adde6220aa687ff1e1f1b2853d36"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_funcarg_parametrized_and_used_twice_TestFixtureUsages.test_funcarg_parametrized_and_used_twice.result_stdout_fnmatch_lin": {"doc_hash": "d6bdc07f0eaccecb9071af8b57ae10151fe07a15a097e768fbd5c86001cfcb13"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_factory_uses_unknown_funcarg_as_dependency_error_TestFixtureUsages.test_factory_uses_unknown_funcarg_as_dependency_error.result_stdout_fnmatch_lin": {"doc_hash": "c2068ef1d1ab45a6e7fb237c26b265054aaf2f7495ff3f7290ab31186157075f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_factory_setup_as_classes_fails_TestFixtureUsages.test_request_can_be_overridden.reprec_assertoutcome_pass": {"doc_hash": "fe169cd937cd7e517ec987fd56dd89a44f68ee720a97b220266ffa9c37320146"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_usefixtures_marker_TestFixtureUsages.test_usefixtures_marker.reprec_assertoutcome_pass": {"doc_hash": "b654e568f3956f7c53bcf9420677933b373a4f2001002abb469f53c210d10424"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_usefixtures_ini_TestFixtureUsages.test_usefixtures_ini.reprec_assertoutcome_pass": {"doc_hash": "393c0513160f3ff1bf7ff5542f601f2c6997c86261338c5dfaca9d2300c094d1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_usefixtures_seen_in_showmarkers_TestFixtureUsages.test_request_instance_issue203.reprec_assertoutcome_pass": {"doc_hash": "df524391f6efa852bc6b7f94d03f5f05bc289d54b618fc7027f8711015e1a413"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_fixture_parametrized_with_iterator_TestFixtureUsages.test_fixture_parametrized_with_iterator.assert_values_1_2_1": {"doc_hash": "544145e42d0e9b880ddb7609f053fc9e274323b3b6c8eafadb943ba6b0f60783"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_setup_functions_as_fixtures_TestFixtureUsages.test_setup_functions_as_fixtures.result_stdout_fnmatch_lin": {"doc_hash": "c59c3f35dc6782129eb8b70fc989fe33ae2f980530491b107b445a29c66c9cf2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureManagerParseFactories_TestFixtureManagerParseFactories.test_parsefactories_evil_objects_issue214.reprec_assertoutcome_pass": {"doc_hash": "a4508bebf79aeb612258da0184a03909dadbdec083ba38a957a5c156a4745fe1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureManagerParseFactories.test_parsefactories_conftest_TestFixtureManagerParseFactories.test_parsefactories_conftest.reprec_assertoutcome_pass": {"doc_hash": "9b0f2aa7bf14364ddf4f195c5839b099540df4c757e27cb838dfe5e18328f3bb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureManagerParseFactories.test_parsefactories_conftest_and_module_and_class_TestFixtureManagerParseFactories.test_parsefactories_conftest_and_module_and_class.reprec_assertoutcome_pass": {"doc_hash": "56c10449a35dff9d4631257df8e97becea2590a7dabd1a78811f5814e2ada26c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureManagerParseFactories.test_parsefactories_relative_node_ids_TestFixtureManagerParseFactories.test_parsefactories_relative_node_ids.with_runner_as_cwd_.reprec_assertoutcome_pass": {"doc_hash": "934747df036015c8d82b8f40fefedcd9a1ce5f95fb1d9c0751887e005be4cc50"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureManagerParseFactories.test_package_xunit_fixture_TestFixtureManagerParseFactories.test_package_xunit_fixture.reprec_assertoutcome_pass": {"doc_hash": "a3d941f63cc73a70de3c6ff2df1e142210374286e2583d1212e8d8f39eda1691"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureManagerParseFactories.test_package_fixture_complex_TestFixtureManagerParseFactories.test_collect_custom_items.result_stdout_fnmatch_lin": {"doc_hash": "efba642fe3eebd0cc32074207943743d83f401e496b994fd4a0854f896e35248"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseDiscovery_TestAutouseDiscovery.testdir.return.testdir": {"doc_hash": "bb6341836b32868a52325fd28793154d11a54d36703ef14f6bc8d4354e5bf902"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseDiscovery.test_parsefactories_conftest_TestAutouseDiscovery.test_parsefactories_conftest.reprec_assertoutcome_pass": {"doc_hash": "8e7df4845bcaf895e78313ce0e5e6b8154c5adc9a3bb23468eeac2bcff1cad0c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseDiscovery.test_two_classes_separated_autouse_TestAutouseDiscovery.test_two_classes_separated_autouse.reprec_assertoutcome_pass": {"doc_hash": "a865d6b267c94249979c36813af734012bcbbdaded6223ed60aa9fbc836d8a46"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseDiscovery.test_setup_at_classlevel_TestAutouseDiscovery.test_setup_at_classlevel.reprec_assertoutcome_pass": {"doc_hash": "d2a7ad14858961a8fa3c1804f6810eff7f96908da53f018676b835084f70772d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseDiscovery.test_setup_enabled_functionnode_TestAutouseDiscovery.test_setup_enabled_functionnode.reprec_assertoutcome_pass": {"doc_hash": "6af47767a018693450bfc7671b3359e9a2b435eba9a1e8e994445b80f84c16a8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseDiscovery.test_callables_nocode_TestAutouseDiscovery.test_callables_nocode.reprec_assertoutcome_fail": {"doc_hash": "0d4aa8d2d9d5793405ad60c2f7ffa886453ba3e6382e5cfdc4a2b3975d563f92"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseDiscovery.test_autouse_in_conftests_TestAutouseDiscovery.test_autouse_in_conftests.result_stdout_fnmatch_lin": {"doc_hash": "333142ca30604ff570f57448bd786eebd6dca35978393fb12579c5733c871e2f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseDiscovery.test_autouse_in_module_and_two_classes_TestAutouseDiscovery.test_autouse_in_module_and_two_classes.reprec_assertoutcome_pass": {"doc_hash": "fdbc1d85dc0e8bb6a769de9ee9870425bab6313693d03cc9d8d1fe6b16758d09"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement_TestAutouseManagement.test_autouse_conftest_mid_directory.reprec_assertoutcome_pass": {"doc_hash": "eaec5ee1e97b7d8da4a008551339ea2dea5a15870ff1fb2876bf08cf98746da1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement.test_funcarg_and_setup_TestAutouseManagement.test_funcarg_and_setup.reprec_assertoutcome_pass": {"doc_hash": "f13b5a1f1511ecd8f2479f5d049e5575e430e10f10d8770dccdae23574502c5a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement.test_uses_parametrized_resource_TestAutouseManagement.test_uses_parametrized_resource.reprec_assertoutcome_pass": {"doc_hash": "6c911385edb3ad8cbcef655e74ded223dd47abaf17941da2521e9087c1eff541"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement.test_session_parametrized_function_TestAutouseManagement.test_session_parametrized_function.reprec_assertoutcome_pass": {"doc_hash": "d6913e2556ca3341a0d735fc8370876fe72c91fd1ddae6e58c4a93a87428a6ae"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement.test_class_function_parametrization_finalization_TestAutouseManagement.test_class_function_parametrization_finalization.assert_values_fin_a1": {"doc_hash": "987b0e6ac859936e95f34fecac8cd86754736a09e9d002baf1c7287d85427777"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement.test_scope_ordering_TestAutouseManagement.test_scope_ordering.reprec_assertoutcome_pass": {"doc_hash": "76cd61fa7baaf64423cbe77b996cdcb6e0dd4eb9d1e19721366e107e00a21684"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement.test_parametrization_setup_teardown_ordering_TestAutouseManagement.test_parametrization_setup_teardown_ordering.reprec_assertoutcome_pass": {"doc_hash": "c5aa9701369e406d8abd2ac4492bf63523261312cf1e76415fd7e26956d8c238"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement.test_ordering_autouse_before_explicit_TestAutouseManagement.test_ordering_autouse_before_explicit.reprec_assertoutcome_pass": {"doc_hash": "ba27f51166463495694ca9da06d0f547ca8533ec67987b90c6de2cf4cf4649fa"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement.test_ordering_dependencies_torndown_first_TestAutouseManagement.test_ordering_dependencies_torndown_first.reprec_assertoutcome_pass": {"doc_hash": "2f99341569ff583281d22b04b154095e455105c5b10d2650989c3c2cca30f76b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker_TestFixtureMarker.test_multiple_parametrization_issue_736.reprec_assertoutcome_pass": {"doc_hash": "6faa9c6ef59dcb35480076e6cd286c255c29f945e3bd169bb87e7b93ed00ea05"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_override_parametrized_fixture_issue_979_TestFixtureMarker.test_override_parametrized_fixture_issue_979.reprec_assertoutcome_pass": {"doc_hash": "2a26682e5b475c2dc488221e001f72b1830be99226ff0d0fccc89362233f11d6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_scope_session_TestFixtureMarker.test_scope_session.reprec_assertoutcome_pass": {"doc_hash": "cb032da8b5fc56896ce0a17c8bef90485578b7e796f436f419ce31c96b861c86"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_scope_session_exc_TestFixtureMarker.test_scope_session_exc.reprec_assertoutcome_skip": {"doc_hash": "803e294dc4968a6555687e929f368526c92ae08ae510dceca360e8d89c65ecaa"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_scope_session_exc_two_fix_TestFixtureMarker.test_scope_session_exc_two_fix.reprec_assertoutcome_skip": {"doc_hash": "79e1cf961b984c8a315b6de7c75e2a1aac09e310a4cc24a0daec587feae311ae"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_scope_exc_TestFixtureMarker.test_scope_exc.reprec_assertoutcome_skip": {"doc_hash": "78894a7423bd41134ea7f97ad0296fe2b54ff19683b96cc102815540e0f74790"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_scope_module_uses_session_TestFixtureMarker.test_scope_module_uses_session.reprec_assertoutcome_pass": {"doc_hash": "db457322a9717ad0d9287ff5581d98e073b5b5e1479f53b7ccb007069f0b27ba"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_scope_module_and_finalizer_TestFixtureMarker.test_scope_module_and_finalizer.reprec_assertoutcome_pass": {"doc_hash": "307ccb745720b77611700d4b5353e3aeaf5b63426c826fcdf5b00f0a57a6bd5d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_scope_mismatch_various_TestFixtureMarker.test_scope_mismatch_various.result_stdout_fnmatch_lin": {"doc_hash": "6a0c743c8e1fccfc73fc8fdfa87632c77d590f148316866d0baaee13551e99e9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_register_only_with_mark_TestFixtureMarker.test_register_only_with_mark.reprec_assertoutcome_pass": {"doc_hash": "fa8bfe1d4e57c6cc260b85d9649aac72c0bc22a89be0aeec6500b6d14460f70a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_parametrize_and_scope_TestFixtureMarker.test_parametrize_and_scope.assert_c_in_values": {"doc_hash": "47c42b8baf10bbdcc852744210123331dbb36e83d7a4cef793f9d01335d8a95f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_scope_mismatch_TestFixtureMarker.test_scope_mismatch.result_stdout_fnmatch_lin": {"doc_hash": "d34dca44055b1fc0fde8c49a2c5e612a1a171050eae5ed30f87102ee66ebe57d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_parametrize_separated_order_TestFixtureMarker.test_parametrize_separated_order.assert_values_1_1_2": {"doc_hash": "21ec9b0864562e2703fdf9f1570c11d18ffe4e260d148f3c4d189f6265f259cf"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_module_parametrized_ordering_TestFixtureMarker.test_module_parametrized_ordering.result_stdout_fnmatch_lin": {"doc_hash": "3a060507f3e7b69f32c5ff4318c17f0ccf069435bfac4d0c35816d756d51414a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_dynamic_parametrized_ordering_TestFixtureMarker.test_dynamic_parametrized_ordering.result_stdout_fnmatch_lin": {"doc_hash": "dc34c85f11448c9671d2e54f735b8e22391c9fb63a8d447c200bdfa05ec545d0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_class_ordering_TestFixtureMarker.test_class_ordering.result_stdout_re_match_li": {"doc_hash": "fdfd0c2cdec86b771c55bad68d9927283c1723adc54c87d5b0148ed3094b7d48"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_parametrize_separated_order_higher_scope_first_TestFixtureMarker.test_parametrize_separated_order_higher_scope_first.assert_values_expected": {"doc_hash": "01b6814b3f0232bbf912595933984a190859daf4431ed339eb843b2d863675e1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_parametrized_fixture_teardown_order_TestFixtureMarker.test_parametrized_fixture_teardown_order.assert_error_not_in_res": {"doc_hash": "4cf7b17c3dea3f481f6b088b949c7076863d369662c235a2ce05deec6b06811c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_fixture_finalizer_TestFixtureMarker.test_fixture_finalizer.for_test_in_test_browse.reprec_stdout_fnmatch_lin": {"doc_hash": "71b519ff9ac5d7ce2b00e6f0968d7271cade50c99afd4dac7353da66f872a039"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_class_scope_with_normal_tests_TestFixtureMarker.test_class_scope_with_normal_tests.for_test_in_test_a_t.assert_reprec_matchreport": {"doc_hash": "bd32c0c3abb77e330409b40c8c9d5dd235d3675ecbb0c2b87b73bdbe7339db9c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_request_is_clean_TestFixtureMarker.test_request_is_clean.assert_values_1_2_": {"doc_hash": "792e435aa973cfce75be421c22b10b3efa3aa2682bf77b80cf302874749a4a45"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_parametrize_separated_lifecycle_TestFixtureMarker.test_parametrize_separated_lifecycle.assert_values_5_fin2": {"doc_hash": "86265cb86e78c52e0b101a563e0362f6dcfbc50e5997094b3ac850b68b8f7378"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_parametrize_function_scoped_finalizers_called_TestFixtureMarker.test_parametrize_function_scoped_finalizers_called.reprec_assertoutcome_pass": {"doc_hash": "78cc313b9fc5c47dcc15eb0e7d3d5bb825df06d6878cc88cf4908d60d3798d3d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_finalizer_order_on_parametrization_TestFixtureMarker.test_finalizer_order_on_parametrization.reprec_assertoutcome_pass": {"doc_hash": "6173272da3eed612c7bd7a8ac1861e6a3656b5ec5d73297578876f8e7d5bd200"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_class_scope_parametrization_ordering_TestFixtureMarker.test_class_scope_parametrization_ordering.assert_values_": {"doc_hash": "e90b558f0045490f8a5a9ebc4d98994b978502c0f59164357a5d8d1a9ffa26e3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_parametrize_setup_function_TestFixtureMarker.test_parametrize_setup_function.reprec_assertoutcome_pass": {"doc_hash": "1f9a6f77b6df7a89155cdbbead431ad59d8235b75dc99a726c80c2d96b719e5e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_fixture_marked_function_not_collected_as_test_TestFixtureMarker.test_params_and_ids.res_stdout_fnmatch_lines_": {"doc_hash": "fc32eb1dc2bffa27a16a531a96c29155209e30c0a7bb5ef3bffc2de72380ec73"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_params_and_ids_yieldfixture_TestFixtureMarker.test_params_and_ids_yieldfixture.res_stdout_fnmatch_lines_": {"doc_hash": "d71b6186b6089523b50f5f29e18d53360e34d4ac8c38d74cddbd133c60d112ab"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_deterministic_fixture_collection_TestFixtureMarker.test_deterministic_fixture_collection.assert_out1_out2": {"doc_hash": "5816a1a98ce1f4e9461e0e146a0a4e2ef360a847d79d765b97d3dce6f53558ec"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestScopeAccess_TestRequestScopeAccess.test_setup.reprec_assertoutcome_pass": {"doc_hash": "5e01709c2800b8bf1d53a33b6c85a7298e38d9efd0400b7543e21b4aad86d22c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestScopeAccess.test_funcarg_TestRequestScopeAccess.test_funcarg.reprec_assertoutcome_pass": {"doc_hash": "afd47ca1b6b53aa00b42bf9a3912e0502fa5f29aca99da3130082f9db1bd489a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestErrors_TestErrors.test_subfactory_missing_funcarg.result_stdout_fnmatch_lin": {"doc_hash": "a5435e252677678020305eca58098e8dbd0735913fce4f9069d01434edec5be2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestErrors.test_issue498_fixture_finalizer_failing_TestErrors.test_issue498_fixture_finalizer_failing.result_stdout_fnmatch_lin": {"doc_hash": "eaeeb58d9ac07de9f2155458c77a863278510188733ac8680fa1c99fef46e320"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestErrors.test_setupfunc_missing_funcarg_TestErrors.test_setupfunc_missing_funcarg.result_stdout_fnmatch_lin": {"doc_hash": "ccee15a53b44dcdaeb09583e89a96f8b1d9f34d61831145ab193a9da3ae496f8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures_TestShowFixtures.test_show_fixtures_verbose.result_stdout_fnmatch_lin": {"doc_hash": "22f95f17be2fc7846ac0a57138e54c469ce60fea3b6b8a588b2990ffa73cc417"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures.test_show_fixtures_testmodule_TestShowFixtures.test_show_fixtures_testmodule.assert_arg0_not_in_resu": {"doc_hash": "1847970ae5c11f8b35a886905c6a248e6dc6648328a7c566dac4ec4c853d1ba7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures.test_show_fixtures_conftest_TestShowFixtures.test_show_fixtures_conftest.result_stdout_fnmatch_lin": {"doc_hash": "dc3048e5fcbde75623cb5c813b19872360509870af7543339a6f5ba7ceedff8c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures.test_show_fixtures_trimmed_doc_TestShowFixtures.test_show_fixtures_trimmed_doc.result_stdout_fnmatch_lin": {"doc_hash": "c4c7a95a3f350b540f5e1ac7070d070f9245a6347fdc7cdb5e9f01e276813ae9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures.test_show_fixtures_indented_doc_TestShowFixtures.test_show_fixtures_indented_doc.result_stdout_fnmatch_lin": {"doc_hash": "9a0d52cddfa361064b23270788e3782ed19d0f57c8e701fc9a33a33827880d82"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures.test_show_fixtures_indented_doc_first_line_unindented_TestShowFixtures.test_show_fixtures_indented_doc_first_line_unindented.result_stdout_fnmatch_lin": {"doc_hash": "a22bbda2df7ce2c6c1b74a5b29c317369e3deed9e738ba0dc403cbfa343ac8bb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures.test_show_fixtures_indented_in_class_TestShowFixtures.test_show_fixtures_indented_in_class.result_stdout_fnmatch_lin": {"doc_hash": "7ae07f6ffa06a3c99b135fb38f84a0740021976097f08306ba9cecbf96953744"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures.test_show_fixtures_different_files_TestShowFixtures.test_show_fixtures_different_files.result_stdout_fnmatch_lin": {"doc_hash": "f294f450961d4ed487f858aa398e07f1293b52aae5c87d2ec0f88f78fd9cbb3c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures.test_show_fixtures_with_same_name_TestShowFixtures.test_fixture_disallow_twice.with_pytest_raises_ValueE.foo.pass": {"doc_hash": "51996818e71eb71f3568eab0e6b76637e9e0fffe5fd0e87cabfce71b96e33837"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestContextManagerFixtureFuncs_TestContextManagerFixtureFuncs.flavor.testdir_makepyfile_": {"doc_hash": "2c192151d9f2201c4ffaf8045c08316b69657196f80208c805a85210d1745a77"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestContextManagerFixtureFuncs.test_simple_TestContextManagerFixtureFuncs.test_simple.result_stdout_fnmatch_lin": {"doc_hash": "63b3c483e160558d8f75f4c5d06680f773971ee884fbef9bbeec828355626553"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestContextManagerFixtureFuncs.test_scoped_TestContextManagerFixtureFuncs.test_scoped.result_stdout_fnmatch_lin": {"doc_hash": "40606d7bf430e6603efd9f9b3c778098e4ae10fa86919d4d9faf4c0e45b1fa5e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestContextManagerFixtureFuncs.test_setup_exception_TestContextManagerFixtureFuncs.test_teardown_exception.result_stdout_fnmatch_lin": {"doc_hash": "b01def44faef59c592078a52c7b50cd606624e48e7a32c96277d100568cb0a38"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestContextManagerFixtureFuncs.test_yields_more_than_one_TestContextManagerFixtureFuncs.test_custom_name.result_stdout_fnmatch_lin": {"doc_hash": "d6f9e88442ae4e58efb2d60a118fd4ea31402a68fabaa156faba68f62e067a65"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestParameterizedSubRequest_TestParameterizedSubRequest.test_call_from_fixture.result_stdout_fnmatch_lin": {"doc_hash": "d350b70f02ea2f8dc6e69f9e709b0ccb1c1513c8fcbb1b571742777ca46ec52d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestParameterizedSubRequest.test_call_from_test_TestParameterizedSubRequest.test_call_from_test.result_stdout_fnmatch_lin": {"doc_hash": "1f636cf81173234a404847330a9a0b0f68e6adc7b9a38399e931dc98dffaa3c5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestParameterizedSubRequest.test_external_fixture_TestParameterizedSubRequest.test_external_fixture.result_stdout_fnmatch_lin": {"doc_hash": "ace4ae69e175fadfa45ebe60c87e6903a340a1ce9511404d20c58eda7ea33bdb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestParameterizedSubRequest.test_non_relative_path_TestParameterizedSubRequest.test_non_relative_path.result_stdout_fnmatch_lin": {"doc_hash": "eaad9a4dadbb91432abc8db1a8a9c1982cb66f714b700b6861037127ec8b8261"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_test_pytest_fixture_setup_and_post_finalizer_hook_test_pytest_fixture_setup_and_post_finalizer_hook.result_stdout_fnmatch_lin": {"doc_hash": "a5a4adcb63df8d8d9e7ac294186f039ba03fd7e826a14ea9c5d531860357a2df"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestScopeOrdering_TestScopeOrdering.test_func_closure_module_auto.assert_request_fixturenam": {"doc_hash": "3116fb448c521bd040faea78729dac321554c9fb417aace514aa51b994700fdc"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestScopeOrdering.test_func_closure_with_native_fixtures_TestScopeOrdering.test_func_closure_with_native_fixtures.None_2": {"doc_hash": "e3c29a8d6dc1347441803b866c868cd0262a9c48f758efd0ae41ad7187a84342"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestScopeOrdering.test_func_closure_module_TestScopeOrdering.test_func_closure_module.assert_request_fixturenam": {"doc_hash": "6a888c44a1fc64b856caea4b1b2f2ef87e9027da8d5ee52194dabe64a39c13c1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestScopeOrdering.test_func_closure_scopes_reordered_TestScopeOrdering.test_func_closure_scopes_reordered.assert_request_fixturenam": {"doc_hash": "3c31767cc3566e4834b331899d2a6ada8923a0629c28652ec16077df1be2cbbc"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestScopeOrdering.test_func_closure_same_scope_closer_root_first_TestScopeOrdering.test_func_closure_same_scope_closer_root_first.assert_request_fixturenam": {"doc_hash": "25319c0b2cdd9b34501dbe842b4740117488aa823fbbfaa84a6724161e250772"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestScopeOrdering.test_func_closure_all_scopes_complex_TestScopeOrdering.test_func_closure_all_scopes_complex.assert_request_fixturenam": {"doc_hash": "6ba14466205ece52fd367f3cdf0d2a4530fd54a6f2d1105a55d19a516110a6eb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestScopeOrdering.test_multiple_packages_": {"doc_hash": "54156a4cc6e36f1a030349101f506cdfa9161d00034d26c3e7a7fc6f344c10f1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_pytest_TestOEJSKITSpecials.test_funcarg_non_pycollectobj.assert_clscol_funcargs_a": {"doc_hash": "08dea6a6ce4e3c588e3cfe600ccc7a8989a55f6d60d2394664485224b80c1072"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestOEJSKITSpecials.test_autouse_fixture_test_wrapped_getfslineno.assert_lineno_lineno2_": {"doc_hash": "7795feef8099124e414f7459299eebff010df149b09ac6cae7c4891dc29b46d7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestMockDecoration_TestMockDecoration.test_unittest_mock.reprec_assertoutcome_pass": {"doc_hash": "748a9b7ebb2b36525a5387727bdd29d0fd4ab7083f515a270bd991d5a870f8b3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestMockDecoration.test_unittest_mock_and_fixture_TestMockDecoration.test_unittest_mock_and_fixture.reprec_assertoutcome_pass": {"doc_hash": "9bd0a42ef179e7905b66a58a91f169f8035fb83201a9b6af0b8071d3fde5ad82"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestMockDecoration.test_unittest_mock_and_pypi_mock_TestMockDecoration.test_unittest_mock_and_pypi_mock.reprec_assertoutcome_pass": {"doc_hash": "0178a89c808ab3954c561458f321a2d507a40ff88ff8726c28cebdec820e50f6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestMockDecoration.test_mock_TestMockDecoration.test_mock.assert_funcnames_T_t": {"doc_hash": "53f14123606618d16fa13764e6a78c7cc476bfbb7652d39a29b143e45bc3235d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestMockDecoration.test_mock_sorting_TestMockDecoration.test_mock_sorting.assert_names_test_on": {"doc_hash": "949550b005088575d078e99190cf6ca0429ff297b5c1563c9d0c16aa1adac26f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestMockDecoration.test_mock_double_patch_issue473_TestMockDecoration.test_mock_double_patch_issue473.reprec_assertoutcome_pass": {"doc_hash": "9a8a5a48699500d3d0afdbb8d3848dc07bc7306bb3745209ca723ce6fef1498e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestReRunTests_TestReRunTests.test_rerun.None_3": {"doc_hash": "ec1aec9e327f7d84e17300819f277509c5fc0c47cd194fb5f63b59023bbbe7b2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_test_pytestconfig_is_session_scoped_TestNoselikeTestAttribute.test_class_and_method.assert_not_calls": {"doc_hash": "6494727dd89b88f00ab2c16988786d3585e5583b49b38708929157d69b8cc4e6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestNoselikeTestAttribute.test_unittest_class_TestNoselikeTestAttribute.test_unittest_class.assert_call_items_0_cls_": {"doc_hash": "49beb73523b760c671bdc5336a53c528e84111b17f1ad924ae902750154262f2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestNoselikeTestAttribute.test_class_with_nasty_getattr_TestNoselikeTestAttribute.test_class_with_nasty_getattr.assert_not_call_items": {"doc_hash": "32ecbdba546bf2d52d1cec79ff32a12efc201f1a2f3afff591763722cbbd9565"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestParameterize_TestParameterize.test_idfn_marker.res_stdout_fnmatch_lines_": {"doc_hash": "732b8a0ad8f1e5fb91e118ab7fb1250c24327529a82bfa99afcf11886aa1e217"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestParameterize.test_idfn_fixture_": {"doc_hash": "2089364fe4519f721837746bac049895278213566d5308aa03227389f8ae8e61"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py__coding_utf_8__TestMetafunc.test_function_basic.assert_metafunc_cls_is_No": {"doc_hash": "10a8bd3e9db348247b1b8b54c2e79dc9244d28bb7e9450a2dfd5774f79839615"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_error_TestMetafunc.test_parametrize_bad_scope.with_pytest_raises_.metafunc_parametrize_x_": {"doc_hash": "b979d12f1a7928b19334be7f01cdc30c192c0e4cc4038bfb776fbdd537bc00db"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_find_parametrized_scope_TestMetafunc.test_find_parametrized_scope.None_12": {"doc_hash": "8229a247564ee0816e5d4ca5c6a8718e1acafc4522f7947a45493ac64c4cbf8d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_and_id_TestMetafunc.test_parametrize_empty_list.assert_skip_metafunc": {"doc_hash": "f99ff041bad84e9abfb6a42fd59bb6c4f55c58841a4e258c1cceb803addd3931"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_with_userobjects_TestMetafunc.test_parametrize_with_userobjects.assert_metafunc__calls_3_": {"doc_hash": "3cd5fe58f258b3e86acc082c065d900d58c0754d7c522591c999c9932f4f14bf"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_idval_hypothesis_TestMetafunc.test_idval_hypothesis.escaped_encode_ascii_": {"doc_hash": "57cd5deb886b0106393486c6ce72e26338eee5774a2fd498110aabcedd21978c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_unicode_idval_TestMetafunc.test_unicode_idval.for_val_expected_in_valu.assert__idval_val_a_6": {"doc_hash": "44d36534d86cbc02bedd71049deaff1bfdd504df54c360d2555e20bdd9d36658"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_bytes_idval_TestMetafunc.test_bytes_idval.for_val_expected_in_valu.assert__idval_val_a_6": {"doc_hash": "b58089ba73857a5c037cc780574a7a65fad5a751a106e9adff6248b4e7dd160d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_class_or_function_idval_TestMetafunc.test_class_or_function_idval.for_val_expected_in_valu.assert__idval_val_a_6": {"doc_hash": "740f787e84bb70cbced526d213f06545861d4524b6a95e0ef0d9255245933bc8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_idmaker_autoname_TestMetafunc.test_idmaker_with_bytes_regex.assert_result_foo_": {"doc_hash": "0a79018cf26ce600d99ea8827440df15207877e4396602453410536bd2f95602"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_idmaker_native_strings_TestMetafunc.test_idmaker_native_strings.assert_result_": {"doc_hash": "ad6626476f4aa7b2b6362b8f6ee5e19f8bbfd0a7472162792da5eed1579e04c7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_idmaker_non_printable_characters_TestMetafunc.test_idmaker_non_printable_characters.assert_result_x00_": {"doc_hash": "f5a4a0f6b82ce9720bf0b8fd4ed900dd003e6698ee0f391f0614fef7c2ab1cd2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_idmaker_manual_ids_must_be_printable_TestMetafunc.test_idmaker_enum.assert_result_Foo_on": {"doc_hash": "3377f8146c2a87ff1f2c85dc521e5489f3e907a3bd52ced5d27d2d7f6bed7f87"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_idmaker_idfn_TestMetafunc.test_idmaker_idfn.assert_result_10_0_I": {"doc_hash": "6540fdf5b7ce70b0ca5d5b61a9e9f230c422060dc30f7796b01e2ac091317f33"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_idmaker_idfn_unique_names_TestMetafunc.test_idmaker_idfn_unique_names.assert_result_a_a0_": {"doc_hash": "35e8ba1a37bab461aeb22386dd9ea545f0ba52357461569db5fe0061a23768d9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_ids_exception_TestMetafunc.test_parametrize_ids_exception.result_stdout_fnmatch_lin": {"doc_hash": "2312158891fd691558cd49aa150e9b56a7730fbe3af4354b6bd76228dea023ae"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_ids_returns_non_string_TestMetafunc.test_idmaker_with_ids_unique_names.assert_result_a0_": {"doc_hash": "cf6ca4019d70e89996d3a7139871fa27bd546cfd53505bdc39100ab9420d999d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_indirect_TestMetafunc.test_parametrize_indirect.None_4": {"doc_hash": "dff979fb2173344b7f2b1ea5918391c391a9aed17326337c9e5ba5104d41e953"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_indirect_list_TestMetafunc.test_parametrize_indirect_list_empty.None_1": {"doc_hash": "5dc3caef4ad70953200fd69d604e6ee5e07fac41b71c4dc41d057ff1c6478dc4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_indirect_list_functional_TestMetafunc.test_parametrize_indirect_list_functional.result_stdout_fnmatch_lin": {"doc_hash": "a8a44f5144a5714e86d73616eb3bf311139059a18338a6a466a27815184cbccd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_indirect_list_error_TestMetafunc.test_parametrize_uses_no_fixture_error_indirect_false.result_stdout_fnmatch_lin": {"doc_hash": "73823dc7b37dd23322cae26aea52215ef0fe76ae4cbc7e1ec70f385f70221bf1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_uses_no_fixture_error_indirect_true_TestMetafunc.test_parametrize_uses_no_fixture_error_indirect_true.result_stdout_fnmatch_lin": {"doc_hash": "c86166b3a607b8ba575ab8328a4f098a1eae064572455b2ae3cf8b12dea7ae2c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_indirect_uses_no_fixture_error_indirect_string_TestMetafunc.test_parametrize_indirect_uses_no_fixture_error_indirect_string.result_stdout_fnmatch_lin": {"doc_hash": "41ca20dc903bde7105e66c38606540cc1ecd104971e0e9836369452092f529fb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_indirect_uses_no_fixture_error_indirect_list_TestMetafunc.test_parametrize_indirect_uses_no_fixture_error_indirect_list.result_stdout_fnmatch_lin": {"doc_hash": "9e170a3157910c8263e76f75fc896818e56eeac58112983ffb74b3eb51fb43e7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_argument_not_in_indirect_list_TestMetafunc.test_parametrize_argument_not_in_indirect_list.result_stdout_fnmatch_lin": {"doc_hash": "18b895f083484adec48aefed37d11be6c4ac0fb0d99537a0dfca2aea97621471"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_gives_indicative_error_on_function_with_default_argument_TestMetafunc.test_parametrize_gives_indicative_error_on_function_with_default_argument.result_stdout_fnmatch_lin": {"doc_hash": "00d6f3f31b8ea25e4ed0c3e8fd2b88a5c6336000a20160b486f1e509dfc41d04"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_functional_TestMetafunc.test_parametrize_functional.result_stdout_fnmatch_lin": {"doc_hash": "fa5d8f4d2aca1ce2f58e7f40f9fe6052c4925346a9d86b4f9841d613a344be23"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_onearg_TestMetafunc.test_parametrize_onearg_indirect.None_3": {"doc_hash": "0ec10b3afc9c42cbf31d3d2502516b5d4850a0694102c29b61979b824e26eb6f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_twoargs_TestMetafunc.test_parametrize_twoargs.None_4": {"doc_hash": "08eb2027ff0d37ff82cfdae152e5e9332e02863458b2d96d4c8092ab335b542a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_multiple_times_TestMetafunc.test_parametrize_CSV.reprec_assertoutcome_pass": {"doc_hash": "78f7b511803c2a7c0a2a90ede90d670d972957157db59e97cb4bfb6c14d1d80d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_class_scenarios_TestMetafunc.test_parametrize_class_scenarios.result_stdout_fnmatch_lin": {"doc_hash": "6071d6dd5636292fe80162f5fde06a58bde8c96ff465bbfdaa464dc94f6fdad8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_format_args_TestMetafunc.test_format_args.None_3": {"doc_hash": "0b28f3035ab6c8b5560c9492182635e84ba73ce699b11c02719f9d32ab44b918"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional_TestMetafuncFunctional.test_attributes.result_assert_outcomes_pa": {"doc_hash": "8925c1a323af14fef013debca9299b2e9bbed38ef1f8fb282278a9e4f86a11ac"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_two_functions_TestMetafuncFunctional.test_two_functions.result_stdout_fnmatch_lin": {"doc_hash": "fd11a16a91074f6d1fcdf587e228fe51f3be106f3228f60d217bde82209582d3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_noself_in_method_TestMetafuncFunctional.test_generate_tests_in_class.result_stdout_fnmatch_lin": {"doc_hash": "e68968e5b47d0ca1480cc6b32220df33ac5c6a1f234653a4480e913dcbe4baf6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_two_functions_not_same_instance_TestMetafuncFunctional.test_two_functions_not_same_instance.result_stdout_fnmatch_lin": {"doc_hash": "c3dcb491c20fc0d64f3d0d040d99ffb797b15852384d1584aaac59a71a9a39e7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_issue28_setup_method_in_generate_tests_TestMetafuncFunctional.test_issue28_setup_method_in_generate_tests.result_assert_outcomes_pa": {"doc_hash": "82fd8d6aae284eda8acdd4f3d6190ad83936b3549080ad3c5217e8b7d2b9a323"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_functional2_TestMetafuncFunctional.test_parametrize_functional2.result_stdout_fnmatch_lin": {"doc_hash": "baae6d7fccc2ffb262226f4106485ba13a8a6d0a4dc637445ab23bb59726ef0e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_and_inner_getfixturevalue_TestMetafuncFunctional.test_parametrize_and_inner_getfixturevalue.result_stdout_fnmatch_lin": {"doc_hash": "28b170edf9c72d2b559b585d0b0afc6806a763b1e54878ffea7f795864cbc5c5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_on_setup_arg_TestMetafuncFunctional.test_parametrize_on_setup_arg.result_stdout_fnmatch_lin": {"doc_hash": "0bbb7abd0269e7fefa53ca79337827d88e2abe5200585f5dd167497bfeae8a4f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_with_ids_TestMetafuncFunctional.test_parametrize_with_ids.result_stdout_fnmatch_lin": {"doc_hash": "c97422aeb1e54e053632b08a367f764af5b280b18af3219f6e22b3f592bc11d9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_without_ids_TestMetafuncFunctional.test_parametrize_without_ids.result_stdout_fnmatch_lin": {"doc_hash": "d0ae1fe2574f3a3d009d78d8d7cd3677e4e7ac9990dd1b27359c001fab23bd85"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_with_None_in_ids_TestMetafuncFunctional.test_parametrize_with_None_in_ids.result_stdout_fnmatch_lin": {"doc_hash": "d1c1e42eaa8a533850f57358430c088e3c8994b878e9b233e671adf46145a10d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_fixture_parametrized_empty_ids_TestMetafuncFunctional.test_parametrized_empty_ids.result_stdout_fnmatch_lin": {"doc_hash": "6c202fb962328ebe58e5280921439bc480987d48f29421e00dbb5ed07bdd02f6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrized_ids_invalid_type_TestMetafuncFunctional.test_parametrized_ids_invalid_type.result_stdout_fnmatch_lin": {"doc_hash": "900f2611843a8fc6884a03dd53facbced5d20a120c6bf21fe05e890070f0d180"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_with_identical_ids_get_unique_names_TestMetafuncFunctional.test_parametrize_with_identical_ids_get_unique_names.result_stdout_fnmatch_lin": {"doc_hash": "f93cc1c5610653f5b415dfaa4296eea9399f72cc5fb586db1c07cc6c55a79694"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_scope_overrides_TestMetafuncFunctional.test_parametrize_scope_overrides.reprec_assertoutcome_pass": {"doc_hash": "b2f166cfcd00d9e836a4df5058d33a1bf668d98d9262a7f07537a28061f88cf9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_issue323_TestMetafuncFunctional.test_usefixtures_seen_in_generate_tests.reprec_assert_outcomes_pa": {"doc_hash": "809cdf3a3f4e205404b3764240dbf56ac79371a58defdbeb7ed6edf36c31fee7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_generate_tests_only_done_in_subdir_TestMetafuncFunctional.test_generate_tests_only_done_in_subdir.result_assert_outcomes_pa": {"doc_hash": "30e0621df24c1be37787f9e6cee847ee99edb06be23f962f32594478524578e1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_generate_same_function_names_issue403_TestMetafuncFunctional.test_parametrize_misspelling.result_stdout_fnmatch_lin": {"doc_hash": "c7f21284b11ffaef4a4127e79d74cd76f1d0b14797f949f85c460dc181e79c0b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctionalAuto_TestMetafuncFunctionalAuto.test_parametrize_auto_scope.result_stdout_fnmatch_lin": {"doc_hash": "4e656ac5e5539c5b020d7fb6edc861c3832dfbf401df2d2a66170682c02100e3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctionalAuto.test_parametrize_auto_scope_indirect_TestMetafuncFunctionalAuto.test_parametrize_auto_scope_indirect.result_stdout_fnmatch_lin": {"doc_hash": "85f5aea6dd719d4c303e48774fc550ddbca2efab77f267f633baf0532b61f79b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctionalAuto.test_parametrize_auto_scope_override_fixture_TestMetafuncFunctionalAuto.test_parametrize_auto_scope_override_fixture.result_stdout_fnmatch_lin": {"doc_hash": "4549d705b14602c83e4108fcf398f63942ff6abf6bc2876fddd198e066d55dce"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctionalAuto.test_parametrize_all_indirects_TestMetafuncFunctionalAuto.test_parametrize_all_indirects.result_stdout_fnmatch_lin": {"doc_hash": "3fcd33fff423edac7547759cfb5d429c85b13c5f9da5e1fe0d2889c0b6aa12e0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctionalAuto.test_parametrize_some_arguments_auto_scope_TestMetafuncFunctionalAuto.test_parametrize_some_arguments_auto_scope.assert_class_fix_setup_": {"doc_hash": "25e610b14c2d08cdc8c53a99f68f3d5abb8983ad19ab042793a91867625fd2f8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctionalAuto.test_parametrize_issue634_TestMetafuncFunctionalAuto.test_parametrize_issue634.None_1": {"doc_hash": "9f0ee7ff04adf5f5683bec614cc5b18c337bb1895dbffd6f10b08aac4708126a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization_TestMarkersWithParametrization.test_simple_mark.None_3": {"doc_hash": "b3f8710bcf73d52f2f5629b03a4ba10674bf7887a21369d8ed755a8a2e897472"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_select_based_on_mark_TestMarkersWithParametrization.test_select_based_on_mark.assert_len_fail_0": {"doc_hash": "cd93f0301adf9767052b0ea6ad319a83f4fe76ae996bb4955b76af7714dffa84"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_nested_marks_TestMarkersWithParametrization.test_nested_marks.for_mark_in_foo_bar_.None_2": {"doc_hash": "a36a9799114b848714ecb16664530b4ee436fd1ae21cce31e85202b6dd6636c4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_simple_xfail_TestMarkersWithParametrization.test_simple_xfail.reprec_assertoutcome_pass": {"doc_hash": "16c093a5f1860a9eda84687ff9907995807c4aedeee5b8e318edf05f76ccd387"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_simple_xfail_single_argname_TestMarkersWithParametrization.test_simple_xfail_single_argname.reprec_assertoutcome_pass": {"doc_hash": "0dffd4ac7c7b771454e1375649b3594a1155ddf27ae9852a5f9bec92c3df37f6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_xfail_with_arg_TestMarkersWithParametrization.test_xfail_with_arg.reprec_assertoutcome_pass": {"doc_hash": "5e6dfd3a2d79d4bf9c66dc73d00095e3d7ccdf0fd0bbfb9e0a64b8b22890d9c8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_xfail_with_kwarg_TestMarkersWithParametrization.test_xfail_with_kwarg.reprec_assertoutcome_pass": {"doc_hash": "4433f785db94166da5f0effa3152000bf9afe11feaa56e867015c053c2f56d93"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_xfail_with_arg_and_kwarg_TestMarkersWithParametrization.test_xfail_with_arg_and_kwarg.reprec_assertoutcome_pass": {"doc_hash": "a1ef7e0184ceb890b8e2f8ce23ca938d7490e71495685feaf5fdc0503affa11f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_xfail_passing_is_xpass_TestMarkersWithParametrization.test_xfail_passing_is_xpass.reprec_assertoutcome_pass": {"doc_hash": "c8667584e5959531037f85c658ed3e283b91534e604f6a516d18e91cf0ba470e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_parametrize_called_in_generate_tests_TestMarkersWithParametrization.test_parametrize_ID_generation_string_int_works.reprec_assertoutcome_pass": {"doc_hash": "d4e07ce085c801dd636f5c2564d4c36c95bf69a0a05fd32720da86714bdfa964"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_parametrize_marked_value_TestMarkersWithParametrization.test_parametrize_marked_value.reprec_assertoutcome_pass": {"doc_hash": "3172a0d50d3517db756f0ba91e52cffa5e0af60dec8cccb13b6101b1995a9395"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_pytest_make_parametrize_id_TestMarkersWithParametrization.test_pytest_make_parametrize_id.result_stdout_fnmatch_lin": {"doc_hash": "454019a6cbc624aee19ba060b9040e9dee1f01424a939972c08a72c3bea7fc65"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_pytest_make_parametrize_id_with_argname_": {"doc_hash": "65b3cb263bb7cedefe780a6fbfec63ea14978e41b7dafb6a9c8b0020bbbdf971"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_sys_TestRaises.test_raises_repr_inflight.with_pytest_raises_E_as_.raise_E_": {"doc_hash": "ba1522957e4900507b0fdaa3d478dc5738065341070d1015f5e968d6b73ed881"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_TestRaises.test_raises_as_contextmanager_TestRaises.test_raises_as_contextmanager.result_stdout_fnmatch_lin": {"doc_hash": "58a3320f389db4bf37ccceb0b264d3be7cfa122608e69da1eb5ad3a37fa35a25"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_TestRaises.test_does_not_raise_TestRaises.test_does_not_raise.result_stdout_fnmatch_lin": {"doc_hash": "078ae3ffa057aeecc4c48a052c6f07351f9d25c6522d798b00a0bc2f86ebde88"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_TestRaises.test_does_not_raise_does_raise_TestRaises.test_does_not_raise_does_raise.result_stdout_fnmatch_lin": {"doc_hash": "f2c45eb8188657e3b563dc14cd346861698ea2455e3be545e89bd582596b279b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_TestRaises.test_noclass_TestRaises.test_custom_raise_message.try_.else_.assert_False_Expected_p": {"doc_hash": "ead60c9225c747adc57bf2db1a80ec2938e59d5fa1b9c26ba027082f574b3380"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_TestRaises.test_raises_cyclic_reference_TestRaises.test_raises_cyclic_reference.for_o_in_gc_get_objects_.assert_type_o_is_not_T": {"doc_hash": "89a48be02a2d3ffa56a74be9d8790fcca20415bfb242ab60721ab8826829e37a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_TestRaises.test_raises_match_TestRaises.test_raises_match.with_pytest_raises_Assert.with_pytest_raises_ValueE.int_asdf_base_10_": {"doc_hash": "8bed9a7690f83769bdaa12e7fb5a370e05fdb5176d71b55b9a1eda38a9ab3432"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_TestRaises.test_raises_match_wrong_type_TestRaises.test_raises_exception_looks_iterable.with_pytest_raises_.pytest_raises_ClassLooksI": {"doc_hash": "17540530a45c9ea4fe4a8936817f8e33322ac046c6a3c3e7884e297e93c0e646"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_TestRaises.test_raises_with_raising_dunder_class_": {"doc_hash": "ad35fde5ff1dc46a81c5f615c0232a322114900c5aa30c008952ebc12e6b9c13"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_pytest_test_show_only_active_fixtures.assert__arg0_not_in_res": {"doc_hash": "b7c16c0f0cbf60ecbbabdb71a9f6c1b7a6b52a3364f36787f849ebd0826b42a5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_show_different_scopes_test_show_different_scopes.result_stdout_fnmatch_lin": {"doc_hash": "4e62c31584ea15ccde29ad7440d38e730b369b22bbffcce0be4b20af7143528c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_show_nested_fixtures_test_show_nested_fixtures.result_stdout_fnmatch_lin": {"doc_hash": "703380457ff6e9de57bd32bc640e52406689130fba25c28e682ce225b10f0dc7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_show_fixtures_with_autouse_test_show_fixtures_with_autouse.result_stdout_fnmatch_lin": {"doc_hash": "6ef2ea56a057f3f141716e11a7d8698cabad210c61a454952dc10e286cdff83a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_show_fixtures_with_parameters_test_show_fixtures_with_parameters.result_stdout_fnmatch_lin": {"doc_hash": "f56734cc5ffab5305c81497e43c6e1ba7048f4a8537591f7d1ea4883c7066a58"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_show_fixtures_with_parameter_ids_test_show_fixtures_with_parameter_ids.result_stdout_fnmatch_lin": {"doc_hash": "bd9739c407a295f88ee4d011c61bbca367fc348928de6885058891455073ddc6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_show_fixtures_with_parameter_ids_function_test_show_fixtures_with_parameter_ids_function.result_stdout_fnmatch_lin": {"doc_hash": "cf6bd809eb91b8ba24a3a579abdfd46a36244ec7b21a7b07146d891a409ddf45"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_dynamic_fixture_request_test_dynamic_fixture_request.result_stdout_fnmatch_lin": {"doc_hash": "21665d1f76be99a0e03b76265574b3a0cc87a9ea5cdeed12db026d0ee83479c9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_capturing_test_capturing.result_stdout_fnmatch_lin": {"doc_hash": "4c3b5e80b938e56cdcc6b9d2717b431100333fa4a0579b22c0b94ce14998d937"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_show_fixtures_and_execute_test_": {"doc_hash": "89fca6bd4ee80a4751781d64b33620f9ea07b67dfdbea262b68e0ce366aa588e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_plan.py_test_show_fixtures_and_test_": {"doc_hash": "b065f6224dd1038d6046de433e3f556ae705828013b9cf8203528c7836150c11"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/show_fixtures_per_test.py__coding_utf_8__test_fixtures_in_module.assert__arg0_not_in_res": {"doc_hash": "2bcf3b197e7c28bd73b76e00c172c710358679d0d684aceac31851548a9583c7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/show_fixtures_per_test.py_test_fixtures_in_conftest_test_fixtures_in_conftest.result_stdout_fnmatch_lin": {"doc_hash": "e6878be6b50c1060f5eeb9d4fe6aacf70296ff1f9e152a75c8872125a6efde06"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/show_fixtures_per_test.py_test_should_show_fixtures_used_by_test_test_should_show_fixtures_used_by_test.result_stdout_fnmatch_lin": {"doc_hash": "83aafaeed9d5d09e450e3f652dc4b26c9ee9ef6495c3053cbe61fbcd01bb4efd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/show_fixtures_per_test.py_test_verbose_include_private_fixtures_and_loc_test_verbose_include_private_fixtures_and_loc.result_stdout_fnmatch_lin": {"doc_hash": "9aa7947661c2549f4547d25b4e059ab9c1938e0f1aeaf19f3734451789ed0c88"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/show_fixtures_per_test.py_test_doctest_items_": {"doc_hash": "b600011922cdb592e8650c6010bce27a0b1d613e2675f90e5d43e684496e73ca"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_argcomplete.py_from___future___import_ab_equal_with_bash.return.retval": {"doc_hash": "d7fcec8d0934877d39e3a90b3373812c430e1391997f16df82d661b60a4ced3d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_argcomplete.py__copied_from_argcomplete__wrapcall.try_.except_subprocess_CalledP.return._": {"doc_hash": "df1a04b616e84a3f359c17ef5e954ab42d7e9fac5e954cd22841d21f47fbe20f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_argcomplete.py_FilesCompleter_FilesCompleter.__call__.return.completion": {"doc_hash": "98311aea05ec2d7cbf6f94c53479c2fce6e6ab664401a60507ef5e24d1575ec3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_argcomplete.py_TestArgComplete_": {"doc_hash": "b7acad63eda5dce8b7a84811ad36bcd62cf23f53d69fab62b2250bd2b934338e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py__coding_utf_8__mock_config.return.Config_": {"doc_hash": "5502d17123d3ada7e8f38fe0b04fac272cb7d0ddefa025b253310df78b852dda"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestImportHookInstallation_TestImportHookInstallation.test_conftest_assertion_rewrite.result_stdout_fnmatch_lin": {"doc_hash": "13fbd2ab957ca803fba107cf6316de2e8aef175a6318b151dd691ce5c8f65833"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestImportHookInstallation.test_rewrite_assertions_pytester_plugin_TestImportHookInstallation.test_rewrite_assertions_pytester_plugin.result_stdout_fnmatch_lin": {"doc_hash": "f4f019af270592b144cd463450674ecb5c4b93cadf0cc909eacf1c7c707fd529"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestImportHookInstallation.test_pytest_plugins_rewrite_TestImportHookInstallation.test_pytest_plugins_rewrite.result_stdout_fnmatch_lin": {"doc_hash": "bc925f7f9b003fcc3d5fa12154ad4a6004a43d46316c42816f81eb93534624a2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestImportHookInstallation.test_pytest_plugins_rewrite_module_names_TestImportHookInstallation.test_pytest_plugins_rewrite_module_names.assert_result_ret_0": {"doc_hash": "ec71bfa3c7cde8dae4883c15f7f4ada6e0bb5bcd5ddaddea9c18b46e58151050"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestImportHookInstallation.test_pytest_plugins_rewrite_module_names_correctly_TestImportHookInstallation.test_pytest_plugins_rewrite_module_names_correctly.assert_result_ret_0": {"doc_hash": "9bbf1e264724f15d850e013efa91aebe533e88686146cbbce23b795f80870a84"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestImportHookInstallation.test_installed_plugin_rewrite_TestImportHookInstallation.test_installed_plugin_rewrite.result_stdout_fnmatch_lin": {"doc_hash": "5a5b7617ee6432f07f48546d9803c33129ab709a729e555ecaedb22e70f94428"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestImportHookInstallation.test_rewrite_ast_TestImportHookInstallation.test_register_assert_rewrite_checks_types.pytest_register_assert_re": {"doc_hash": "8567ac4fd0cabc40a282fec8cfb882b3959d5aa084e1567c123a36f67294e85b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestBinReprIntegration_callequal.return.plugin_pytest_assertrepr_": {"doc_hash": "abb423e4a12b7705b2d11674cc3ec0467cd39f3c78cb2715bbc0e3464d02db7a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare_TestAssert_reprcompare.test_list.assert_len_expl_1": {"doc_hash": "e08a9148ab79a3041df9807499f29b55132a748edb053ea0e808cd8d4bc9886b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare.test_iterable_full_diff_TestAssert_reprcompare.test_iterable_full_diff.assert_expl_endswith_text": {"doc_hash": "12b985f8981017b7427ca6d5c46b0dd23b85a0443ab60daa88161843053724f4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare.test_list_different_lengths_TestAssert_reprcompare.test_dict_omitting.for_line_in_lines_1_.assert_b_not_in_line": {"doc_hash": "bea708956775bcf1dbe27b985d778905d6ba312920ad7a11be6918f2e96496ae"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare.test_dict_omitting_with_verbosity_1_TestAssert_reprcompare.test_dict_omitting_with_verbosity_2.assert_lines_2_b_": {"doc_hash": "ca7ce963799052b093d10b8feb893fa60f130c73d2b6e8128a39550c6dc3912d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare.test_dict_different_items_TestAssert_reprcompare.test_dict_different_items.None_1": {"doc_hash": "b7c11e9966c8645949704f382ace303922ed27234e820a9133fa8f6196a44ef2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare.test_sequence_different_items_TestAssert_reprcompare.test_sequence_different_items.None_1": {"doc_hash": "8eb6d9510bf9f9d73f7ffb7c45106ccecb7b91d98915c79eedb0e02c5e310004"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare.test_set_TestAssert_reprcompare.test_Sequence.assert_len_expl_1": {"doc_hash": "08a06bbbab1f2838fc743f6572624c607116e42f01675d312c1fcde830f81e75"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare.test_list_tuples_TestAssert_reprcompare.test_repr_verbose.None_5": {"doc_hash": "22b5bf787724cef20c3062538732acd4f5b4b63beeaf1ed2e748bd00041d5381"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare.test_list_bad_repr_TestAssert_reprcompare.test_mojibake.assert_msg": {"doc_hash": "ba2b3d3d5e78a5ad74ef595ddda76878e93a856208559c313b2f1b461a1b9176"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare_dataclass_TestAssert_reprcompare_dataclass.test_dataclasses.result_stdout_fnmatch_lin": {"doc_hash": "f1bd1c7f0ff52c61c38479df46b2c67499f29dfe2166cd039c626dfe2fa099ba"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare_dataclass.test_dataclasses_verbose_TestAssert_reprcompare_dataclass.test_dataclasses_verbose.result_stdout_fnmatch_lin": {"doc_hash": "b72a6125d143bbb23fe896bb782950fdeac3625e0f1a2bb9945c1dba74a1ece1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare_dataclass.test_dataclasses_with_attribute_comparison_off_TestAssert_reprcompare_dataclass.test_comparing_two_different_data_classes.result_assert_outcomes_fa": {"doc_hash": "34baba882c5dae47ae4e283d852a53b2b70e546be2bff6316b8a9e46ae4fb6a0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare_attrsclass_TestAssert_reprcompare_attrsclass.test_attrs_verbose.assert_lines_2_fie": {"doc_hash": "512bb1040672d5a75137276a0314ac62c8337eeaa1463426eb9d5e6ed466b5bd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare_attrsclass.test_attrs_with_attribute_comparison_off_TestAssert_reprcompare_attrsclass.test_comparing_two_different_attrs_classes.assert_lines_is_None": {"doc_hash": "d637a1fdfa85e046a8ba7889dbb8a2907b560a2c7d243fa894ebb53b17863bf8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestFormatExplanation_TestFormatExplanation.test_fmt_newline_escaped.assert_util_format_explan": {"doc_hash": "7283afd898cbdab6ba0f3ab999e76c98216a3e63d6b06df2133a2d8a70fb5473"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestFormatExplanation.test_fmt_newline_before_where_TestFormatExplanation.test_fmt_newline_before_where.assert_util_format_explan": {"doc_hash": "e4be62cb0f6d93919511581837b7286c910d11e6af6a2a6578e0460e646a82f9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestFormatExplanation.test_fmt_multi_newline_before_where_TestFormatExplanation.test_fmt_multi_newline_before_where.assert_util_format_explan": {"doc_hash": "74898ae782ce298d5a2ed07968c587c2d04b21a05b165cead2cd72c8f893c310"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestTruncateExplanation_TestTruncateExplanation.test_doesnt_truncate_at_when_input_is_5_lines_and_LT_max_chars.assert_result_expl": {"doc_hash": "c557482fb25ce41ceafecf08d7cde202780bb573f23f695392d495584dc58851"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestTruncateExplanation.test_truncates_at_8_lines_when_given_list_of_empty_strings_TestTruncateExplanation.test_truncates_at_8_lines_when_given_list_of_empty_strings.assert_last_line_before_t": {"doc_hash": "09fc94623c93f1bbcb28d59a9c6622a4eb4f959165b9b90c82729b24b1e13328"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestTruncateExplanation.test_truncates_at_8_lines_when_first_8_lines_are_LT_max_chars_TestTruncateExplanation.test_truncates_at_8_lines_when_first_8_lines_are_LT_max_chars.assert_last_line_before_t": {"doc_hash": "b24add22911847658447ffb86fdc61dc3f415fdaff7558cfadc812da6181f592"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestTruncateExplanation.test_truncates_at_8_lines_when_first_8_lines_are_EQ_max_chars_TestTruncateExplanation.test_truncates_at_8_lines_when_first_8_lines_are_EQ_max_chars.assert_last_line_before_t": {"doc_hash": "01f916b4455d76ffef89f043a2829ea27fcb4b8ecb8e55d638e0d6b954cfd871"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestTruncateExplanation.test_truncates_at_4_lines_when_first_4_lines_are_GT_max_chars_TestTruncateExplanation.test_truncates_at_4_lines_when_first_4_lines_are_GT_max_chars.assert_last_line_before_t": {"doc_hash": "d1dfd8ef4c6442ec65ddf9a22014e936caaafbce590514198c4a59a1c6457dbf"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestTruncateExplanation.test_truncates_at_1_line_when_first_line_is_GT_max_chars_TestTruncateExplanation.test_truncates_at_1_line_when_first_line_is_GT_max_chars.assert_last_line_before_t": {"doc_hash": "d56f163fc8b27825fd74dfbfea7d126f7d626316b71d95365efea46a3334c86c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestTruncateExplanation.test_full_output_truncated_TestTruncateExplanation.test_full_output_truncated.None_5": {"doc_hash": "46463d837267df78c743a3d2e66a1ba3d805c4ac4574940c4c62a28817afbc74"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_python25_compile_issue257_test_pytest_assertrepr_compare_integration.result_stdout_fnmatch_lin": {"doc_hash": "38a8d19936439f7e5d8ad6d8fed2c4250d5c822765591298bf24c2a6b56aec0f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_sequence_comparison_uses_repr_test_sequence_comparison_uses_repr.result_stdout_fnmatch_lin": {"doc_hash": "9de841bb7d0be77c907b7e85af2aa5781d3057c7b6c67fa3527ef954834429e0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_assertrepr_loaded_per_dir_test_assertrepr_loaded_per_dir.result_stdout_fnmatch_lin": {"doc_hash": "976bbcc8bb70428dbb03e3ebe13b76197e651726f9b20ee46ea234cfde0275e2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_assertion_options_test_triple_quoted_string_issue113.assert_SyntaxError_not_": {"doc_hash": "3130c54c692a1250f0086e931cf618af20fc9b60b53b2a038ef7d372bee35b1d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_traceback_failure_test_traceback_failure.None_1": {"doc_hash": "c20eae03c766567a6e213a29aba547f2e946e4c5ec5106ac44f2bf0be6438333"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_exception_handling_no_traceback_test_exception_handling_no_traceback.result_stdout_fnmatch_lin": {"doc_hash": "89f7d811f58041a3a534b58f35155481e86b343eb24d17d98fb3d442b096c977"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_warn_missing_test_warn_missing.None_2": {"doc_hash": "401f3af0f78613e935bb1c1064802da3439bf43fb0170e6ec068c0764f896309"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_recursion_source_decode_test_AssertionError_message.result_stdout_fnmatch_lin": {"doc_hash": "937f06eebc1a90ca2dd0cb6117b94e271a5757d45bad869d2776956e6225ec48"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_set_with_unsortable_elements_test_set_with_unsortable_elements.assert_n_join_expl_": {"doc_hash": "2008bee03c32cf7e9f6c8362a7e22533194c65458aa6f5aff99f358f68c09986"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_diff_newline_at_end_test_assert_tuple_warning.assert_msg_not_in_result_": {"doc_hash": "27c0b73c49d6c5f732fdca1209a176ac85e473f687282f9acc5ce319a1d9abd9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_assert_indirect_tuple_no_warning_": {"doc_hash": "62154ad0148a605ee62d886168ebfec8bd2676faa82e54ee8d97f51444e971b6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py__coding_utf_8__rewrite.return.tree": {"doc_hash": "0431762e38b2cd95fa7d10307094cd39ce03d029ead815a9bc829f4ebb517314"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_getmsg_getmsg.try_.else_.if_not_must_pass_.pytest_fail_function_did": {"doc_hash": "4c1739d103c98fe28c4b9bba43e897298bc87328117fa7ab694b5d134038e373"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite_TestAssertionRewrite.test_place_initial_imports.None_9": {"doc_hash": "e443e98fc3f9b33aa720aef47570a2532af0c5da74f9f784a7d390189794d258"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_dont_rewrite_TestAssertionRewrite.test_dont_rewrite_plugin.assert_warnings_not_in_": {"doc_hash": "c972eedca26def4c92319cc9f06414485b2e0010a05fd67c6ca4878d66375331"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_name_TestAssertionRewrite.test_name.None_1.else_.assert_msg_assert_cl": {"doc_hash": "b31c1c53306d11c3916492efbb738961485b5b9bf3cdfe0172b30fa90cbfa909"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_dont_rewrite_if_hasattr_fails_TestAssertionRewrite.test_dont_rewrite_if_hasattr_fails.if_request_config_getopti.else_.assert_message_": {"doc_hash": "4a623b1aaec14a467963544ea847d6d874ea23fd1ff4686163d9bf1bba3e09b6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_assert_already_has_message_TestAssertionRewrite.test_assertion_messages_bytes.result_stdout_fnmatch_lin": {"doc_hash": "065701b98c7aceeed9a23e416730884a93e1e0ea60c258c3a2e377b0ab110e61"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_boolop_TestAssertionRewrite.test_boolop.None_2": {"doc_hash": "420b9cd5710dcf6c6be9a516a0f791ce75e12b8a4afe7f7096eadb2ce8285a2d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_short_circuit_evaluation_TestAssertionRewrite.test_unary_op.None_3": {"doc_hash": "6a9fbd279ec17cc6659f1c5957a5f0a18a066c3bdba785cc2787d545426d97c7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_binary_op_TestAssertionRewrite.test_boolop_percent.None_1": {"doc_hash": "31df92e162934bbcda2a449a1aec0aeea3b9893e8c970172f44830fddd39ada6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_at_operator_issue1290_TestAssertionRewrite.test_starred_with_side_effect.testdir_runpytest_asser": {"doc_hash": "e0cdc1caba58bede5eba746fedc8a590988ae072001cc1e23b1640727a6500b2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_call_TestAssertionRewrite.test_call.None_6": {"doc_hash": "10be3b07f1b47a02bd676955dfed4ff4a2d8be405283432ba311ea3e8aeb7df3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_attribute_TestAssertionRewrite.test_comparisons.None_1": {"doc_hash": "04d94d4514950625747447a281cd825f397dd18fe053633ce80e4ce03a087761"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_len_TestAssertionRewrite.test_len.if_request_config_getopti.else_.assert_msg_assert_10_": {"doc_hash": "08079eb1a93b68df1f89b097c877a51c4a58b7ab4bd7b7894cd65eecb9be6c33"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_custom_reprcompare_TestAssertionRewrite.test_custom_reprcompare.None_1": {"doc_hash": "6fe437e6dbe139a437189243980eacfc88d5ef490aea5d0d49f0a8a3ff49b489"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_assert_raising_nonzero_in_comparison_TestAssertionRewrite.test_formatchar.assert_getmsg_f_startswi": {"doc_hash": "7cc557809c34dba78043fcf427b6012cb00173ad62ed485543b8efee10f69cc3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_custom_repr_TestAssertionRewrite.test_custom_repr_non_ascii.assert_UnicodeEncodeErro": {"doc_hash": "8159e1163260a074a3fdba2e517b05a2235f1298d9c5a5be3de0d7518b12981b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport_TestRewriteOnImport.test_pycache_is_readonly.try_.finally_.cache_chmod_old_mode_": {"doc_hash": "1df8b598f64533d88d4af46679ea5a5ba5e9ff0e694655d23e20ca1e0d2f5777"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_zipfile_TestRewriteOnImport.test_zipfile.assert_testdir_runpytest_": {"doc_hash": "7b948e5717308867030bf05af9431a81fa23fea24ee8a8576515a5208c8e65a3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_readonly_TestRewriteOnImport.test_dont_write_bytecode.assert_testdir_runpytest_": {"doc_hash": "76b9e1ec3a32f874d4855ffc05bd2663d34ae111d4ca2314b3b24725e955d5e6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_orphaned_pyc_file_TestRewriteOnImport.test_orphaned_pyc_file.assert_testdir_runpytest_": {"doc_hash": "1104c785b579861276066a5468963cd000655f0b093380d776d54ba4db76b7d7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_pyc_vs_pyo_TestRewriteOnImport.test_pyc_vs_pyo.assert_tagged_pyc_in": {"doc_hash": "2851677ec187b02437e3ded8c5cdc2903df462f68ba98f140642d054104cd0ae"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_package_TestRewriteOnImport.test_rewrite_module_imported_from_conftest.assert_testdir_runpytest_": {"doc_hash": "92151a921e6a7b1dc64233db4c59e53bc2914468cd73f0340adf47b5a4078578"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_remember_rewritten_modules_TestRewriteOnImport.test_remember_rewritten_modules.assert_warnings_": {"doc_hash": "8f89071579742ce2a530956c3e0fb8bc2cc69c097b3b63d8284d1f43ff8a4afe"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_rewrite_warning_using_pytest_plugins_TestRewriteOnImport.test_rewrite_warning_using_pytest_plugins.assert_pytest_warning_su": {"doc_hash": "d2a21900c301759c73dd31e9e6a0ae0558f612c9bf9dfc61c1deb09c1d54306d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_rewrite_warning_using_pytest_plugins_env_var_TestRewriteOnImport.test_rewrite_warning_using_pytest_plugins_env_var.assert_pytest_warning_su": {"doc_hash": "40c9e57bd5dcac5dd2b67f5931513a1939904eeacad443a8909221fab8b159c2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_rewrite_future_imports_TestRewriteOnImport.test_rewrite_future_imports.assert_result_ret_0": {"doc_hash": "e6f423a7715b746b1480b2a241992f876480d996db70796677f14ae60f298405"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails_TestAssertionRewriteHookDetails.test_loader_is_package_true_for_package.result_stdout_fnmatch_lin": {"doc_hash": "bea21a4745cc9206ac5a01e4d972120625dbe6120f50d314d8aeab6207806f8c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails.test_assume_ascii_TestAssertionRewriteHookDetails.test_assume_ascii.assert_SyntaxError_Non_": {"doc_hash": "c5f1b554067747fdb440b248701a9af7d6b37aa4a9b913113efdec838454bc7b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails.test_detect_coding_cookie_TestAssertionRewriteHookDetails.test_sys_meta_path_munged.assert_testdir_runpytest_": {"doc_hash": "8629208909709209f44206d55d507d956fdcb628f9ad444b037c2a78957197a0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails.test_write_pyc_TestAssertionRewriteHookDetails.test_write_pyc.assert_not__write_pyc_sta": {"doc_hash": "267ac02f46c0401ddd53239f8f58f1aea84758fec2ea291958c4e5f4a9c4f9c6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails.test_resources_provider_for_loader_TestAssertionRewriteHookDetails.test_resources_provider_for_loader.result_assert_outcomes_pa": {"doc_hash": "61f5d77b912da4ff8e1443d379bea08e43156a8abaca4122d985723d66080e9b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails.test_read_pyc_TestAssertionRewriteHookDetails.test_read_pyc._no_error": {"doc_hash": "02e513700ed4dc6b2b7029d9d10127f7b1abe5143d39844a4d858744d96817a7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails.test_reload_is_same_TestAssertionRewriteHookDetails.test_reload_is_same.result_stdout_fnmatch_lin": {"doc_hash": "2977dd46d79ee5aed775ddeaeda64be73a685b475df9b2c4f8eba40de24b2bc0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails.test_reload_reloads_TestAssertionRewriteHookDetails.test_reload_reloads.result_stdout_fnmatch_lin": {"doc_hash": "f49150f34210df1635e632cc64af5bedb6f7e26891dc5e048546cebae715882d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails.test_get_data_support_TestAssertionRewriteHookDetails.test_get_data_support.result_stdout_fnmatch_lin": {"doc_hash": "b8c92cbf82f54a2e078003f617e5b164b9152de7380944e8c1353e5fb1f5a8a6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_test_issue731_test_issue731.assert_unbalanced_braces": {"doc_hash": "75b3e4287aca3f5a57c69de12edeca5b6577008b0ba2af1fc99cb48ce5bccd7b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestIssue925_TestIssue925.test_many_brackets.result_stdout_fnmatch_lin": {"doc_hash": "241b37b32e3105ec372c6297fe0d5d130ab6877030cdf1f964e6613be4e96695"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestIssue2121_TestIssue2121.test_rewrite_python_files_contain_subdirs.result_stdout_fnmatch_lin": {"doc_hash": "27f91716d389bf5849e73ec7eb7b1a39d85a34b86a0da0fc3b330c515f12400f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_test_source_mtime_long_long_test_source_mtime_long_long.assert_result_ret_0": {"doc_hash": "f4b44579a69a8d37a8fff776b1740255b305e2d2d37810966c37bca8f70837d8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_test_rewrite_infinite_recursion_test_rewrite_infinite_recursion.assert_len_write_pyc_call": {"doc_hash": "25a2ecff0ba20dcc59e8974b5d62f6f261967b982e0195a1769245cc990dc3ca"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestEarlyRewriteBailout_TestEarlyRewriteBailout.hook.return.hook": {"doc_hash": "5f4d9ec575b9f630762e645d84a4794d72d303ca25d89cdb98f0766d5e4160cf"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestEarlyRewriteBailout.test_basic_TestEarlyRewriteBailout.test_basic.None_7": {"doc_hash": "b6946a145dbf14c795b400c8326a1097e154082cacbe330d5ac78be4ab3e8f44"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestEarlyRewriteBailout.test_pattern_contains_subdirectories_TestEarlyRewriteBailout.test_pattern_contains_subdirectories.assert_self_find_module_c": {"doc_hash": "9cf3448635d50c9ce20feb6642e16dd36fff1866cdf586773603bd59a996ee94"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestEarlyRewriteBailout.test_cwd_changed_": {"doc_hash": "489a085f7d36db2310baf3603602d2c390f06982baf86ee055bc849ed08f8182"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_from___future___import_ab_TestNewAPI.test_cache_writefail_permissions.cache_set_test_broken_": {"doc_hash": "c8ff2e9631f141ac86083d8ca36989d86575e1ab96fa278ca2ae7133078cb73d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestNewAPI.test_cache_failure_warns_TestNewAPI.test_cache_failure_warns.result_stdout_fnmatch_lin": {"doc_hash": "a71853b5186014280dc4be07ac7ce32d00a9facaad99c1826e8b085697daa046"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestNewAPI.test_config_cache_TestNewAPI.test_config_cache.result_stdout_fnmatch_lin": {"doc_hash": "3ea653c737446aee56d404bc0e2012c737e4491c1f0474a7b4d90621ee20e305"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestNewAPI.test_cachefuncarg_TestNewAPI.test_cachefuncarg.result_stdout_fnmatch_lin": {"doc_hash": "7abf89c865c13128d626b445fbadd361e77cce41b499001dff1427fd99a3eafd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestNewAPI.test_custom_rel_cache_dir_TestNewAPI.test_custom_rel_cache_dir.assert_testdir_tmpdir_joi": {"doc_hash": "2c9139ba722c1dda51985c1f5c885c97f9a7b6bd61b2c4e641c2e2c5a29d6cce"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestNewAPI.test_custom_abs_cache_dir_TestNewAPI.test_custom_abs_cache_dir.assert_py_path_local_abs_": {"doc_hash": "3aea977a15513c31a8ae09742e6d60771b874694b178eb5578c8eec5e2188202"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestNewAPI.test_custom_cache_dir_with_env_var_TestNewAPI.test_custom_cache_dir_with_env_var.assert_testdir_tmpdir_joi": {"doc_hash": "4152e71ff1b2e4ac8082ec957bb3fd642534e710812394379f83ce51f8cb55d2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_test_cache_reportheader_test_cache_reportheader.result_stdout_fnmatch_lin": {"doc_hash": "ba8c5df5b7cf043aab58fee93f788c0177a61218081881204b2559a708863d6e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_test_cache_reportheader_external_abspath_test_cache_reportheader_external_abspath.result_stdout_fnmatch_lin": {"doc_hash": "26d0d920056eb5e460fe4c47c210bf4cebee670116fb169bad48226f93e94e4d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_test_cache_show_test_cache_show.None_5": {"doc_hash": "fd9ad470aa30f32fb01ba6ae1b65c2fec80bbb492bdf1b5123499c70915fb1a2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestLastFailed_TestLastFailed.test_lastfailed_usecase.None_6": {"doc_hash": "366080f576ff1cbb7de70f8c4431574a787f554bcb1739b82c4fe1f100a6b4a6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestLastFailed.test_failedfirst_order_TestLastFailed.test_failedfirst_order.None_3": {"doc_hash": "3b90937ec67e42519f6c2ab806de4c8c057d8094001146ac843ee9516ea8215b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestLastFailed.test_lastfailed_failedfirst_order_TestLastFailed.test_lastfailed_failedfirst_order.assert_test_a_py_not_in": {"doc_hash": "572d93c4b6cc474457a3d7a7caf9c9300f6be56eac4c1abe7835cfd1896f6901"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestLastFailed.test_lastfailed_difference_invocations_TestLastFailed.test_lastfailed_difference_invocations.None_6": {"doc_hash": "cc771e2ceea07e5c9fa207594f587d5a6d07cc912e996aae4d7ee2c2216a0ae4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestLastFailed.test_lastfailed_usecase_splice_TestLastFailed.test_lastfailed_usecase_splice.None_5": {"doc_hash": "996e19c32f390d2fc1cf6a4f8e826e1241eac6afbd147b3655c392035c537411"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestLastFailed.test_lastfailed_xpass_TestLastFailed.test_non_serializable_parametrize.result_stdout_fnmatch_lin": {"doc_hash": "d6e7c06cd813652cd4642f87b1370f478180f35b25381544fb1f6bd2f0644f01"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestLastFailed.test_terminal_report_lastfailed_TestLastFailed.test_terminal_report_lastfailed.None_4": {"doc_hash": "fd13718a8e60fec6b9ce6e9d905620c1425f8bb7b25ed81ff5a8563c6905a913"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestLastFailed.test_terminal_report_failedfirst_TestLastFailed.test_terminal_report_failedfirst.None_2": {"doc_hash": "c010260468e89d6e9d7d5236b62a41314ac7a68f01f769db62d0790779ec87c1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestLastFailed.test_lastfailed_collectfailure_TestLastFailed.test_lastfailed_collectfailure.None_2": {"doc_hash": "9433b0019edd11fa687c8bc0fe894b47ca2413f97e19d2c388794bcae6c6542e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestLastFailed.test_lastfailed_failure_subset_TestLastFailed.test_lastfailed_failure_subset.None_3": {"doc_hash": "3b90325f9f0dea7f290de6c854acbfe92c1510e165bde4cf7c1359b62fa65a58"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestLastFailed.test_lastfailed_creates_cache_when_needed_TestLastFailed.test_lastfailed_creates_cache_when_needed.assert_os_path_exists_p": {"doc_hash": "ab0775a24a3e47ae7986562a6619dee622adfd0b2353e54d0b4cb4792067ed52"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestLastFailed.test_xfail_not_considered_failure_TestLastFailed.test_xfail_strict_considered_failure.assert_self_get_cached_la": {"doc_hash": "e03f8b0012fe38579687b161f68f906e98a5eeda73f9b11aec3d91aafc91f995"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestLastFailed.test_failed_changed_to_xfail_or_skip_TestLastFailed.test_failed_changed_to_xfail_or_skip.None_4": {"doc_hash": "427b1efa83d099fbb37eded40223677c424b3d8ea8dd424f9bde1bba3532cf61"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestLastFailed.test_lf_and_ff_prints_no_needless_message_TestLastFailed.get_cached_last_failed.return.sorted_config_cache_get_": {"doc_hash": "49e37fa0f3812ade8b03e1c8e68560362e2ca5259574b2d1e633380735231c44"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestLastFailed.test_cache_cumulative_TestLastFailed.test_cache_cumulative.None_4": {"doc_hash": "1429f1a4879c4513b6275b3db97e5f9378ef6b423d7b770198ce3df4e60e78a5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestLastFailed.test_lastfailed_no_failures_behavior_all_passed_TestLastFailed.test_lastfailed_no_failures_behavior_all_passed.assert_result_ret_EXIT": {"doc_hash": "877ca53f3ddfe2b4b35e04eaa420865d833d878b0be206614e4a6b1c2b1f2f6d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestLastFailed.test_lastfailed_no_failures_behavior_empty_cache_TestLastFailed.test_lastfailed_no_failures_behavior_empty_cache.None_3": {"doc_hash": "4bcb0ee198e4f111d387249e24d86e4f8df708511761d564d35c3c0f910be969"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestLastFailed.test_lastfailed_skip_collection_TestLastFailed.test_lastfailed_skip_collection.None_4": {"doc_hash": "312fe99634eab61c4e6744f4b3567c39c255d45faa58e8a04968f3d0b51137bd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestNewFirst_TestNewFirst.test_newfirst_usecase.None_6": {"doc_hash": "a438ba3a9754e86f765a8100ce02dfce127d843d1889c3c106f2dd5f67e884ca"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestNewFirst.test_newfirst_parametrize_TestNewFirst.test_newfirst_parametrize.None_6": {"doc_hash": "c06786a3b3f6349ee807a763b15515b2fbeaad79af1f66dd57b38a22f2e076b7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_TestReadme_TestReadme.test_readme_failed.assert_self_check_readme_": {"doc_hash": "db1e3d54384fd09dbfa6322d3d1e8c1c6aeb15a213dae0b0c45613e18dc6ebdd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_test_gitignore_test_gitignore.None_1": {"doc_hash": "b856e1ed5d0eab6c25d46561342f4033d23e07c2901646549299a23bc9204b78"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_cacheprovider.py_test_does_not_create_boilerplate_in_existing_dirs_": {"doc_hash": "77ef8390d82b68c0c94ba6114fbd1e1cad348647d96276da016bccc5b85175dd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py__coding_utf_8__StdCapture.return.capture_MultiCapture_out_": {"doc_hash": "f689bc9c42996870b8a37d14a96d4b6a56581d602965eb72d65759c43ab1ce61"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureManager_TestCaptureManager.test_getmethod_default_no_fd.assert_parser__groups_0_": {"doc_hash": "d0d562f7ba0bba4b25714ba65a41107bac7e053ae90dc072f12d7f4d1e15f216"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureManager.test_capturing_basic_api_TestCaptureManager.test_init_capturing.try_.finally_.capouter_stop_capturing_": {"doc_hash": "1a41d76906737be3e558313f617cae5a784f41e8b6765f6e890891c319974101"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_capturing_unicode_test_capturing_unicode.result_stdout_fnmatch_lin": {"doc_hash": "68e1a3454122b90d6a72fc7f7908b8267ad1ebb6a041f7d5aa9e10b060af8ab4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_capturing_bytes_in_utf8_encoding_test_collect_capturing.result_stdout_fnmatch_lin": {"doc_hash": "2a3814d209a249030a822fc4ba06f23d8ef82b7ca20e9648cba883f1951fc1be"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestPerTestCapturing_TestPerTestCapturing.test_capture_and_fixtures.result_stdout_fnmatch_lin": {"doc_hash": "78139f35ecf9d916a463723d110dff4db7f21c76b8952436b4c2b310e51d44b7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestPerTestCapturing.test_capture_scope_cache_TestPerTestCapturing.test_capture_scope_cache.result_stdout_fnmatch_lin": {"doc_hash": "02ca20291dd0a115a8b22850f54dd7344367d97c6664a59eb16ef3547b63451f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestPerTestCapturing.test_no_carry_over_TestPerTestCapturing.test_teardown_capturing.result_stdout_fnmatch_lin": {"doc_hash": "34a11694bbb79dd62e1a3dad40476c3df7b5453393bb0f7cd902c4968c9e7353"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestPerTestCapturing.test_teardown_capturing_final_TestPerTestCapturing.test_teardown_capturing_final.result_stdout_fnmatch_lin": {"doc_hash": "68bdcf85953c9ce8bb05378b01b5742734076cd021f973995a912b0c3a71d08d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestPerTestCapturing.test_capturing_outerr_TestPerTestCapturing.test_capturing_outerr.result_stdout_fnmatch_lin": {"doc_hash": "b145db9b2aa4d090e662b864623a05b547cfd89d60e78c26d600679e215a8351"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestLoggingInteraction_TestLoggingInteraction.test_logging_and_immediate_setupteardown.for_optargs_in_captu.assert_closed_not_in_s": {"doc_hash": "dc614f4a6c66b6851766a5b171c94251812d22bbd62154c6dd3ba200ed22c6c4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestLoggingInteraction.test_logging_and_crossscope_fixtures_TestLoggingInteraction.test_logging_and_crossscope_fixtures.for_optargs_in_captu.assert_closed_not_in_s": {"doc_hash": "50dfaf61fc36fe39b3cba7ef586249b943fb5139a94667a59879698688c6f7ee"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestLoggingInteraction.test_conftestlogging_is_shown_TestLoggingInteraction.test_conftestlogging_is_shown.assert_operation_on_clos": {"doc_hash": "083f7555f268f039401ff9d4255ff4e00ec190ed96bee7f7c6b95566ab406141"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestLoggingInteraction.test_conftestlogging_and_test_logging_TestLoggingInteraction.test_conftestlogging_and_test_logging.assert_operation_on_clos": {"doc_hash": "27868ecf149a76ff1f5e7de3ded85b46e52579135b610a02d524f89045440868"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestLoggingInteraction.test_logging_after_cap_stopped_TestLoggingInteraction.test_logging_after_cap_stopped.assert_": {"doc_hash": "4652a3802dab271f0d37d987cadd13a861fa6796a6fd4bc8fe585123e4604295"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture_TestCaptureFixture.test_capsyscapfd.result_stdout_fnmatch_lin": {"doc_hash": "4252f48d2cd2d034959e96cb05727b2c1bd9b2b1ccaf6e2f200ae09e49ad26f6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_capturing_getfixturevalue_TestCaptureFixture.test_capturing_getfixturevalue.result_stdout_fnmatch_lin": {"doc_hash": "4850aa0f968fb99444049fa8d0f8ba2d222de8d49345c54645b020146f4270a8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_capsyscapfdbinary_TestCaptureFixture.test_stdfd_functional.reprec_assertoutcome_pass": {"doc_hash": "0c0c849a8abb3ac4ab25adabfe89649ded6a57d8047f9c8da402df6132a45d0b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_capfdbinary_TestCaptureFixture.test_capfdbinary.reprec_assertoutcome_pass": {"doc_hash": "85996bf13174e33fb1317ca4afd3d2ad600de17d493203aec741d79cd4c2b366"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_capsysbinary_TestCaptureFixture.test_capsysbinary.reprec_assertoutcome_pass": {"doc_hash": "902b7dcf8f2241193ef559582e4c30e8ed0e62b571133344fc2dcfcd38ce65b5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_capsysbinary_forbidden_in_python2_TestCaptureFixture.test_capsysbinary_forbidden_in_python2.result_stdout_fnmatch_lin": {"doc_hash": "acfb433231164b99f5f46dc06b9da05505ec455424a8b770e73d3060c30c417c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_partial_setup_failure_TestCaptureFixture.test_capture_and_logging.assert_closed_not_in_re": {"doc_hash": "3d3dd48e1d6cfc9f69c5e18e6d68d685dcd9be380315ab22e4a66ab5511e7a8b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_disabled_capture_fixture_TestCaptureFixture.test_disabled_capture_fixture.if_no_capture_.else_.assert_test_normal_execu": {"doc_hash": "37fe5e8c61ecbdb29f727e833bebc1f3ce9197cb6c5e91fcc01f1d34248889b7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_fixture_use_by_other_fixtures_TestCaptureFixture.test_fixture_use_by_other_fixtures.assert_stderr_contents_b": {"doc_hash": "b4fb02dc43e7c2cd78f540f6341bd99f7c32a08b924c18ecf380c7f0b0bea6ef"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_fixture_use_by_other_fixtures_teardown_TestCaptureFixture.test_fixture_use_by_other_fixtures_teardown.reprec_assertoutcome_pass": {"doc_hash": "1657b551f715357c6eff57235b24918389572c4f8224ce03fc7b109af772c4c4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_setup_failure_does_not_kill_capturing_test_setup_failure_does_not_kill_capturing.result_stdout_fnmatch_lin": {"doc_hash": "f0759acbd52d26864feee5c4c1a6b770d7befb26592823db4e7faf0d1e9d36f3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_fdfuncarg_skips_on_no_osdup_test_capture_conftest_runtest_setup.assert_hello19_not_in_r": {"doc_hash": "2de2b1f4022f6893faf9debb782f21d21ade92c86c2ef5d7ff9dfdd47e74f76b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_capture_badoutput_issue412_test_capture_early_option_parsing.assert_hello19_in_resul": {"doc_hash": "dce91bdb25aeb77a3d8d7407204f857d752141fdb83412748e1c82ecd6b76615"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_capture_binary_output_test_capture_binary_output.result_assert_outcomes_pa": {"doc_hash": "00047bf523de6c39296c59839772580a60db29bf94568bf6576ca00e7595eb0b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_error_during_readouterr_test_error_during_readouterr.result_stderr_fnmatch_lin": {"doc_hash": "3393ba9bc496631542756d30e8f7ecc257294dab7e6651b5e231c79f54f80c96"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureIO_TestCaptureIO.test_write_bytes_to_buffer.assert_f_getvalue_f": {"doc_hash": "003175a0d4e3971cf66bf434cc81c41a0a1e6158c34a03dcf1c31f0e5eb85a75"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_dontreadfrominput_test_dontreadfrominput_buffer_python3._just_for_completeness": {"doc_hash": "609cb2750490cc513aceca3b6043656463eddd9c6d41ba77b0e383bd4f07066e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_dontreadfrominput_buffer_python2_test_dupfile.None_4": {"doc_hash": "04f01ed8518930928ca00fa4e384f90ba2992e9fdf3257010551b35a6150bd5f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_dupfile_on_bytesio_test_dupfile_on_textio.assert_not_hasattr_f_na": {"doc_hash": "fa6567c424b17bd5dcb2f89fb26c0f660cebf73025a9640fe069bcc5c668b17b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_lsof_check_lsof_check.assert_len2_len1_3_o": {"doc_hash": "b724a028d1943880b55934238d91b668f5c09de9be1d0b58bda2d021c70f274c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestFDCapture_TestFDCapture.test_writeorg.with_open_tmpfile_name_.assert_stmp_data2": {"doc_hash": "9159d6bba2683547258bc191daefbc0279f5e0fe53f7768984a9e16ac080fa0f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestFDCapture.test_simple_resume_suspend_saved_fd.try_.finally_.os_close_new_fd_": {"doc_hash": "17972419849ba5bbba52b02771de9fab9df6f5c3ed24fba1dd48a96cf066a814"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestStdCapture_TestStdCapture.test_reset_twice_error.assert_not_err": {"doc_hash": "f3884651e73ff27da219df13ac534c5993bebcaba4e74aacb21c5c7f57a0132f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestStdCapture.test_capturing_modify_sysouterr_in_between_TestStdCapture.test_capturing_modify_sysouterr_in_between.assert_sys_stderr_olde": {"doc_hash": "256eaff4fdcb792690f3ce48f2502dbad2705a777fb7c55fa93b86e06360624e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestStdCapture.test_capturing_error_recursive_TestStdCapture.test_stdin_nulled_by_default.with_self_getcapture_.pytest_raises_IOError_sy": {"doc_hash": "3a220a9e5516c917f285868f8b77bf607015e7ca73e79c8b3cb55b959d2927ba"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestStdCaptureFD_TestStdCaptureFD.test_many.with_lsof_check_.for_i_in_range_10_.cap_stop_capturing_": {"doc_hash": "61e6ccd2fdaf519617da2c333b06aae4185404158e1b92474768dfd4f58ef499"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestStdCaptureFDinvalidFD_TestStdCaptureFDinvalidFD.test_stdcapture_fd_invalid_fd.assert_result_parseoutcom": {"doc_hash": "3ef2f123a1224928486c7bc6981e8838928ec483c4e007e6321d03f571038798"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_capture_not_started_but_reset_test_fdcapture_tmpfile_remains_the_same.assert_capfile2_capfil": {"doc_hash": "4d65a9a7b2262aaf97f7e2cbaf4ca1e65452472ac4cee4f0fc2a687b346c402f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_close_and_capture_again_test_close_and_capture_again.result_stdout_fnmatch_lin": {"doc_hash": "db779c115c23f5b35e27c0c29a7ca4db2591478c295d277a7dbf9d7ebdcfe0f0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_capturing_and_logging_fundamentals_test_capturing_and_logging_fundamentals.assert_atexit_not_in_re": {"doc_hash": "bb41442698b909a125d54ea0a7fe6a7ed3539edffaffa44d47d67f8137a7dc43"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_error_attribute_issue555_test_py36_windowsconsoleio_workaround_non_standard_streams._py36_windowsconsoleio_wo": {"doc_hash": "16cfbef4119e1d7687252fa0f39e4cddbcd133f6c81561b60ddf0db2be79079c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_dontreadfrominput_has_encoding_test_pickling_and_unpickling_encoded_file.pickle_loads_ef_as_str_": {"doc_hash": "f47ae0b8890b6c436f64bbe95c82d779a7f784f426932b6791fb60fb8000ccf5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_global_capture_with_live_logging_test_global_capture_with_live_logging.None_7": {"doc_hash": "678d13e3fa796fbeb71cc0228ced911a2be7ab717eb91401966d59392423a316"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_capture_with_live_logging_test_capture_with_live_logging.assert_result_ret_0": {"doc_hash": "f11d1c78a63804d30219a27e58f69c99fc1c75ccae81d8c946ed086140d61492"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_typeerror_encodedfile_write_": {"doc_hash": "51fcdd8904a08fc79ee729066985d86f854642eca29f9322009abc8f4efe8c1d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_from___future___import_ab_TestCollector.test_check_equality.for_fn_in_fn1_fn2_fn3_.assert_modcol_fn": {"doc_hash": "ccea89db5bbbc2896c7b8cc4b43c93ff5f52eecf9294bc84d08ac59fec21fdc2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollector.test_getparent_TestCollector.test_getparent.assert_parent_is_cls": {"doc_hash": "7b24f9c1ba4521a31cc4908f14fc6899b8065eabf4a7bfebccd541b3b01cfebd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollector.test_getcustomfile_roundtrip_TestCollector.test_getcustomfile_roundtrip.assert_isinstance_nodes_0": {"doc_hash": "59439077589ae43cda8fd248d3fc8c297eec1ef4d3be6de80a01a40349efd0ef"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollector.test_can_skip_class_with_test_attr_TestCollector.test_can_skip_class_with_test_attr.result_stdout_fnmatch_lin": {"doc_hash": "76ba779613685faded3819b06a53daa489fa3adb94d77df6cfc709ed178ef946"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollectFS_TestCollectFS.test_ignored_certain_directories.assert_test_found_in_s": {"doc_hash": "cae643717e76d93e542e511f2122ac2fa2639e5fa943eaca4d9d7e867e1a9d38"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollectFS.test_ignored_virtualenvs_TestCollectFS.test_ignored_virtualenvs.None_2": {"doc_hash": "8d05035f758c606432fd31455c639dad0a5673ae708cd5494c6b0a4f02afcf60"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollectFS.test_ignored_virtualenvs_norecursedirs_precedence_TestCollectFS.test_ignored_virtualenvs_norecursedirs_precedence.assert_test_invenv_in_r": {"doc_hash": "8835622b199f4098c153f9fa30fe5eb0ca6a5daea6ab900aef54b135d771e52b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollectFS.test__in_venv_TestCollectFS.test__in_venv.None_1": {"doc_hash": "ac0e28d6bfd76b84f8d1e367844f99e9d8a9e5f58fd2da76dce456dee38a4d30"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollectFS.test_custom_norecursedirs_TestCollectFS.test_custom_norecursedirs.rec_assertoutcome_failed_": {"doc_hash": "06d9f8cfc2b33b0116835756227bbd62e98217756d4eb8f0168bf1adf82357ab"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollectFS.test_testpaths_ini_TestCollectFS.test_testpaths_ini.None_1.assert_x_name_for_x_in_i": {"doc_hash": "cab42652d17e920721224ef51b07c54ca5b9e69027fef6f24d93039758193e6e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollectPluginHookRelay_TestCollectPluginHookRelay.test_pytest_collect_directory.assert_world_in_wascall": {"doc_hash": "c4debb16f87fde1cd63dc9d889122f8c2213704e3f5fe1c1a4b56ac7ac465548"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestPrunetraceback_TestPrunetraceback.test_custom_repr_failure.result_stdout_fnmatch_lin": {"doc_hash": "0867d6cb7f189a49a1951d757dc4cdb7c351be2ecb0b24c1aa1e80a8a389cf09"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestPrunetraceback.test_collect_report_postprocessing_TestPrunetraceback.test_collect_report_postprocessing.result_stdout_fnmatch_lin": {"doc_hash": "fbe8de894b94faa2be5282aa78b859ad39668db1d0e7f6bfc2d74f6afd8612f9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCustomConftests_TestCustomConftests.test_ignore_collect_path.result_stdout_fnmatch_lin": {"doc_hash": "73af89fcbe9448d2354b5275abf038cb1f4d12e034304417f4c44b07b4597879"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCustomConftests.test_ignore_collect_not_called_on_argument_TestCustomConftests.test_ignore_collect_not_called_on_argument.None_2": {"doc_hash": "1f027a7fb8cf232b600082c91ab412a3aa9f9dd48cb5e06170f47178d060140a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCustomConftests.test_collectignore_exclude_on_option_TestCustomConftests.test_collectignore_exclude_on_option.assert_passed_in_result": {"doc_hash": "390793437a4ba9d01951d9340f37cbea7cf9d1edd6ee88ffa597170549d78195"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCustomConftests.test_collectignoreglob_exclude_on_option_TestCustomConftests.test_collectignoreglob_exclude_on_option.None_4": {"doc_hash": "0c25faa46c2d4a17838b088475fffaa67523ac7bd143fe9d2f9c434b396f8fae"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCustomConftests.test_pytest_fs_collect_hooks_are_seen_TestCustomConftests.test_pytest_fs_collect_hooks_are_seen.result_stdout_fnmatch_lin": {"doc_hash": "5e0f3a015c12f48fe9a6b677f924ff0e3bcdcaa25d4afc379f76002124b334e6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCustomConftests.test_pytest_collect_file_from_sister_dir_TestCustomConftests.test_pytest_collect_file_from_sister_dir.result_stdout_fnmatch_lin": {"doc_hash": "9542567d1d4814413e5d27133e01f8777ca9719a804b599471993e18fc36407b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestSession_TestSession.test_parsearg.assert_len_parts_2": {"doc_hash": "d77e6d17e34e41b04ae189fcd5b6c41116969b5d133a04f5dfff2b8e080de8ee"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestSession.test_collect_topdir_TestSession.get_reported_items.return._": {"doc_hash": "a008bfe8313c6d0ade7168b4902bc0ad6662cd1f0ddeb1980e390cb437d113f1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestSession.test_collect_protocol_single_function_TestSession.test_collect_protocol_single_function.assert_x_name_for_x_in_s": {"doc_hash": "5a4e7b949ebe82b41e04977046335359e22c88a28ec4b78e4cd76d391316d492"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestSession.test_collect_protocol_method_TestSession.test_collect_protocol_method.for_id_in_p_basename_p_.assert_x_name_for_x_in_s": {"doc_hash": "b121b2f8e20468af4f9728ea58a2b5822eaae470acd6026bd34cef75fab7505c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestSession.test_collect_custom_nodes_multi_id_TestSession.test_collect_custom_nodes_multi_id.assert_len_self_get_repor": {"doc_hash": "d0e06dda28efeaad0635aee35071e0ce2f6eadfcf02b5c5a2bfbbef56305141e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestSession.test_collect_subdir_event_ordering_TestSession.test_collect_subdir_event_ordering.hookrec_assert_contains_": {"doc_hash": "b3cf296ed55d6d4661daf6265f26812ba53dff875dcc29c015578fd8718dbc36"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestSession.test_collect_two_commandline_args_TestSession.test_collect_two_commandline_args.hookrec_assert_contains_": {"doc_hash": "71236d71f53d92c9a30a56d6997b83b0142595601de8bb631f888de2e4bf0f3f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestSession.test_serialization_byid_TestSession.test_find_byid_without_instance_parents.assert_x_name_for_x_in_s": {"doc_hash": "5cb6253b100d66e89167dba85aa2e9ed99d919b930464df792b0902257309f35"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_Test_getinitialnodes_Test_getinitialnodes.test_pkgfile.for_col_in_col_listchain_.assert_col_config_is_conf": {"doc_hash": "4e37060fd3df0fe3281c66a85329fdb38251794aa5798dcfe292cd6e2c44c238"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_Test_genitems_Test_genitems.test_check_collect_hashes.for_numi_i_in_enumerate_.for_numj_j_in_enumerate_.if_numj_numi_.assert_i_j": {"doc_hash": "cd7aef2e1121569cce71d79fb920752712c062de07ca7fc4d3dc161a9ce565c4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_Test_genitems.test_example_items1_Test_genitems.test_example_items1.print_s_": {"doc_hash": "dc2d1ac3a6b78291278d4a781bf68f2a33dddeeebce311c266aed2e0f2a46185"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_Test_genitems.test_class_and_functions_discovery_using_glob_Test_genitems.test_class_and_functions_discovery_using_glob.assert_ids_MyTestSui": {"doc_hash": "16d43252d2f0383fac5761781a7ce7160bbf0db0264517d8c86467fc31235239"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_matchnodes_two_collections_same_file_test_matchnodes_two_collections_same_file.res_stdout_fnmatch_lines_": {"doc_hash": "4d4d8a208ffe6dd8a518f53f3808d35cba8f45a54eb2e4308d51878fa1b1ce1a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestNodekeywords_TestNodekeywords.test_issue345.reprec_assertoutcome_pass": {"doc_hash": "0b2391964a816af7375e0a307c694dda37bc2a203e38837859cfa3ce63973a3a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_COLLECTION_ERROR_PY_FILES_COLLECTION_ERROR_PY_FILES.dict_": {"doc_hash": "1fc33287f72dfd72ab41c946242bfed3034b2dd11e1fdd0b1a9bd5097a74ccad"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_exit_on_collection_error_test_exit_on_collection_error.res_stdout_fnmatch_lines_": {"doc_hash": "4a1b044fee0cdd088a31d5829f6c4ec392edfd5ac51fdb1f80671564b285229b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_exit_on_collection_with_maxfail_smaller_than_n_errors_test_exit_on_collection_with_maxfail_smaller_than_n_errors.assert_test_03_not_in_r": {"doc_hash": "e1936f19502267ff178ac858093253347512bedccbe246516324d65ea2940da4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_exit_on_collection_with_maxfail_bigger_than_n_errors_test_continue_on_collection_errors.res_stdout_fnmatch_lines_": {"doc_hash": "aef77e26b5ff4fe13f90602a38ca49a76a5b9d4d14157a3016f7e302cf33def4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_continue_on_collection_errors_maxfail_test_continue_on_collection_errors_maxfail.res_stdout_fnmatch_lines_": {"doc_hash": "57d8918c084343e7146eebf6d1667416c7226ab14cee249ad43bcbd90f0e7b56"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_fixture_scope_sibling_conftests_test_fixture_scope_sibling_conftests.res_stdout_fnmatch_lines_": {"doc_hash": "e74e9c67fa21aa0b248b3ccdb9ea37a5719b206dfdc1c20a3c1b608d72016553"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collect_init_tests_test_collect_init_tests.assert_test_foo_not_in_": {"doc_hash": "9f56ecf064c6c9ccdd642369a6c6f7e27a8f92f507e3632ec039779cfabe89e3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collect_invalid_signature_message_test_collect_handles_raising_on_dunder_class.assert_result_ret_0": {"doc_hash": "b1c2424755a9c617f6688e7d9a69b007f2789dbbbff07ade5d65a6a48c68170d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collect_with_chdir_during_import_test_collect_with_chdir_during_import.None_4": {"doc_hash": "a157ee7c7346ae3f0a01a8c2bc0e4f9dbaaadee805dd9184ec0bcaaae9d516eb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collect_pyargs_with_testpaths_test_collect_pyargs_with_testpaths.result_stdout_fnmatch_lin": {"doc_hash": "8cbe0f0d857ac4bc230c62a2fb7d068078b6f63820af2250b038f9e287cd46d5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collect_symlink_file_arg_test_collect_symlink_file_arg.assert_result_ret_0": {"doc_hash": "944551e042ae12d2631247898f654313f8fc296b7dcde599e8d933d486665d52"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collect_symlink_out_of_tree_test_collect_symlink_out_of_tree.assert_result_ret_0": {"doc_hash": "e34e331addbdc42d8f3dbe56dde173def3b7ffa601137c9cbb0b485343f525a6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collectignore_via_conftest_test_collectignore_via_conftest.assert_result_ret_EXIT": {"doc_hash": "fc12e257f749c9e4d4fa1645d453f9c8e24584356518bb0617e5070123648c76"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collect_pkg_init_and_file_in_args_test_collect_pkg_init_and_file_in_args.None_3": {"doc_hash": "4eb52b634d0d0ea180ecdf1f19d3fc9ab191758ea11fe1e4c8aeebd837612cf3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collect_sub_with_symlinks_test_collect_sub_with_symlinks.result_stdout_fnmatch_lin": {"doc_hash": "f1e285475423fa413db2ee95238e9bce5e899860938893d4f084cf8cdf1c8f14"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collector_respects_tbstyle_": {"doc_hash": "a59fc23c2c3a83077c4e8432fffae3e696d5cfd1834b269b5850c062a69850fb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_compat.py_from___future___import_ab_test_is_generator.assert_not_is_generator_f": {"doc_hash": "dbc3366bdc6a3654dad3b69cc598bfe746758907d87fed615ff02421e23bc6c0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_compat.py_test_real_func_loop_limit_test_real_func_loop_limit.with_pytest_raises_.get_real_func_evil_": {"doc_hash": "d93123f4551cbd8c1fef7667e5e9c732d5047b6f7fe16503d830c33b8e823526"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_compat.py_test_get_real_func_test_get_real_func.None_2": {"doc_hash": "db76886e259fa12f89ca49952a884368ae649f0435df91d9b6070a3f71ea8fb0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_compat.py_test_is_generator_asyncio_test_is_generator_asyncio.result_stdout_fnmatch_lin": {"doc_hash": "823a938ec2944cf3d2c49f20315c718186145d02121dd9a4031b5c9e650fb9eb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_compat.py_test_is_generator_async_syntax_test_is_generator_async_syntax.result_stdout_fnmatch_lin": {"doc_hash": "72c4d722b0ac8111caa12b4c60e906df611f09cd5c22c300caadce24fac10dc8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_compat.py_ErrorsHelper_": {"doc_hash": "d92225d1e3f04bd459a96f6473f70ea95b93cb2081f6ef11aee688bc49b1e43c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_from___future___import_ab_None_16": {"doc_hash": "5a95d7209bb743aeca353207d4b1c4690e5fc9e9711595f00a73a13067c0d778"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestParseIni_TestParseIni.test_getcfg_and_config.assert_config_inicfg_nam": {"doc_hash": "cea915ed96b70e145ac3885ae129fc828d20697e99e4666585ddcae054291207"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestParseIni.test_getcfg_empty_path_TestParseIni.test_setupcfg_uses_toolpytest_with_pytest.assert_result_ret_0": {"doc_hash": "d8a540cdc3b627ff4b3970afe13127a77bc6865458d155aaded7e6d681401a7d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestParseIni.test_append_parse_args_TestParseIni.test_tox_ini_wrong_version.result_stderr_fnmatch_lin": {"doc_hash": "683c410b211e421c13e81d754d191528921058fb8e630b2f082893c3ccb84485"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestParseIni.test_ini_names_TestParseIni.test_ini_names.assert_config_getini_min": {"doc_hash": "c05750be76bfa91d61aca3efc18461f7ad98869577caf16097f3146504f19e90"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestParseIni.test_toxini_before_lower_pytestini_TestParseIni.test_confcutdir.assert_result_ret_0": {"doc_hash": "d663f4b21742938bc83a2ba3f64676904abec44eb7667e7e5a2e6244390c1704"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigCmdlineParsing_TestConfigCmdlineParsing.test_absolute_win32_path.assert_ret__pytest_mai": {"doc_hash": "f611916dfc9aec1a5d2c438388d3c7cc269e841e583ffd21ba96c83918f38d22"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI_TestConfigAPI.test_getoption.assert_config_getoption_": {"doc_hash": "9007b1dd77220285d6440d09dfb396c0db601bbd56d19b459e713efbac6a89cc"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_getconftest_pathlist_TestConfigAPI.test_addini.pytest_raises_ValueError_": {"doc_hash": "28772e3e6f45c07e33a0f9f956e2478a02b0e8f90bf831fa71e237c8c26702a0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_addini_pathlist_TestConfigAPI.test_addini_pathlist.pytest_raises_ValueError_": {"doc_hash": "88957c03d639eb3ace7e354f31680b5abca0509fe1a101eb3c1d684c24db37e4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_addini_args_TestConfigAPI.test_addini_args.assert_values_list_12": {"doc_hash": "8a7793237698b6c6be3a26a26372df0132e7e2806a5e981bf6f85dd66ae271f4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_addini_linelist_TestConfigAPI.test_addini_linelist.assert_values_": {"doc_hash": "c314cb9ceb5d4bc6c3e23644be38c7dbe52324d76d33172ffe9349b3e36c68e1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_addini_bool_TestConfigAPI.test_addini_bool.assert_config_getini_str": {"doc_hash": "117c1bd9844d323e7a8f6d478d2ca572e4382833fe77a6d1dd359b8cd3df488f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_addinivalue_line_existing_TestConfigAPI.test_addinivalue_line_existing.None_3": {"doc_hash": "69a8e1e709cc66f5f1ff944ebf6790355fbba49fb6062548b9253ba6152cd25c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_addinivalue_line_new_TestConfigAPI.test_addinivalue_line_new.None_4": {"doc_hash": "355d8a3f921911f853d5796fe9f55a6502f8ccaa2538513394293252c5be53a2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_confcutdir_check_isdir_TestConfigAPI.test_confcutdir_check_isdir.assert_config_getoption_": {"doc_hash": "d9d39c98856ce862351f92979dcf4b2a69c95c336ddcdfa7e4dca860e8553fb5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_iter_rewritable_modules_TestConfigAPI.test_iter_rewritable_modules.assert_list__iter_rewrita": {"doc_hash": "61eba34f7c9e7d566517e3e60efae1b1b089168cbbfa41aaeb6eedc37c6e92a8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigFromdictargs_TestConfigFromdictargs.test_origargs.assert_config_option_capt": {"doc_hash": "3f71d7dce72fb89ce23d1f9b066a669f84ab6ef6bfecbc77f245df5861d04724"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigFromdictargs.test_inifilename_TestConfigFromdictargs.test_inifilename.None_5": {"doc_hash": "646b1687dd27f7750f4b0bba1e5369d363c20e323cfc6f232e44591d9fd547e9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_options_on_small_file_do_not_blow_up_test_options_on_small_file_do_not_blow_up.for_opts_in_.runfiletest_opts_path_": {"doc_hash": "d0a6f2adae76ee667b49ec6de616f30e5d81ed54aef6bbf0bf5f45240bc19717"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_preparse_ordering_with_setuptools_test_preparse_ordering_with_setuptools.assert_plugin_x_42": {"doc_hash": "3d9b5b692024bc6063fd7527054166f644eb2fd5bb8810326ee61566d42e2750"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_setuptools_importerror_issue1479_test_setuptools_importerror_issue1479.with_pytest_raises_Import.testdir_parseconfig_": {"doc_hash": "c63d5caadf35be01038764414a8ccc3e382d86a98f83d9bdba8c417328a0406d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_plugin_preparse_prevents_setuptools_loading_test_plugin_preparse_prevents_setuptools_loading.if_block_it_.else_.assert_": {"doc_hash": "c995b320b8be134769e578c112985a884c9a535fe70a7f97da2d4cf16991966d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_disable_plugin_autoload_test_disable_plugin_autoload.assert_has_loaded_shou": {"doc_hash": "7c0201c7caf87c673ca54fe8968b00fa893a3592df418e62f88d25b984808534"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_cmdline_processargs_simple_test_invalid_options_show_extra_information.result_stderr_fnmatch_lin": {"doc_hash": "51560f1587e5fcace065007529d9f7a20ba839829ecc0ce20faaab66619a8fec"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_consider_args_after_options_for_rootdir_test_consider_args_after_options_for_rootdir.result_stdout_fnmatch_lin": {"doc_hash": "6275f6307ba02b3157512dbaf88f998b43f882dd729a171b912bec6cd9165261"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_toolongargs_issue224_test_config_in_subdirectory_colon_command_line_issue2148.assert_result_ret_0": {"doc_hash": "d6f88dc019260207229ba8d31ea4b08d457ccf7ddc2da6630ba19a4c096ef33b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_notify_exception_test_notify_exception.None_5": {"doc_hash": "140ca304c41ead89a816e98af5013a9381df3f799b1f5321b12b85679fd13dcf"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_load_initial_conftest_last_ordering_test_get_plugin_specs_as_list.None_5": {"doc_hash": "fbb8f930bfae4342d1e5717ea40a76fb94d4abdfba53782b3bdd72b275b8c86c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_collect_pytest_prefix_bug_integration_test_collect_pytest_prefix_bug.assert_pm_parse_hookimpl_": {"doc_hash": "af7d0c9829ed886506269ef19572810b1a96cc2ea2ef7cdda4b82b889bda9396"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestRootdir_TestRootdir.test_simple_noini.with_tmpdir_as_cwd_.None_2": {"doc_hash": "3fd43bb9161b6c5ae29811d7c601d48faa826e5d5b9fc8ca1e52707bf563df62"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestRootdir.test_with_ini_TestRootdir.test_with_ini.assert_inifile_inifile": {"doc_hash": "65369467ed559528f6439b32d89b0a321bde7db94a152b798b5b058a22b8a819"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestRootdir.test_pytestini_overrides_empty_other_TestRootdir.test_with_specific_inifile.assert_rootdir_tmpdir": {"doc_hash": "9e5570dd7876b8cd59db79a8f6c9721a68fba63e35924c907f5a9a674e6dbe36"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestOverrideIniArgs_TestOverrideIniArgs.test_override_ini_names.None_4": {"doc_hash": "9921a2df0e9d9ace672c38db790d4eb85f3e3c8216120aafbb6f06a62697fa3a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestOverrideIniArgs.test_override_ini_pathlist_TestOverrideIniArgs.test_override_ini_pathlist.result_stdout_fnmatch_lin": {"doc_hash": "02687403791848e4ada7ec30b4af2b9ee001d17d4776d31cd53f67b632410254"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestOverrideIniArgs.test_override_multiple_and_default_TestOverrideIniArgs.test_override_multiple_and_default.result_stdout_fnmatch_lin": {"doc_hash": "d295ee17528296e327bc9adf3891e7b17e150dbc31e6bdd4761d653b6590d33f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestOverrideIniArgs.test_override_ini_usage_error_bad_style_TestOverrideIniArgs.test_override_ini_handled_asap.result_stdout_fnmatch_lin": {"doc_hash": "3e89d4b954f178cf1f3a4bb08dc915d7a97a15131272b6e5f6bd9da3e68b28cb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestOverrideIniArgs.test_with_arg_outside_cwd_without_inifile_TestOverrideIniArgs.test_addopts_before_initini.assert_config__override_i": {"doc_hash": "74909f756ad77fa57d48d193f84ceae97333f96d4463cfe3474063a3fdfd4638"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestOverrideIniArgs.test_addopts_from_env_not_concatenated_TestOverrideIniArgs.test_addopts_from_env_not_concatenated.assert_": {"doc_hash": "7803a193946893cdef86c4c712d87b5aa41d26cc2c3a4e84bf4d8a0d9b2656c9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestOverrideIniArgs.test_addopts_from_ini_not_concatenated_TestOverrideIniArgs.test_override_ini_does_not_contain_paths.assert_config__override_i": {"doc_hash": "4a3846ef123ad55c30d6bfbe28f01d607f2b5b2b7cc00113f971717c18c8e680"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestOverrideIniArgs.test_multiple_override_ini_options_test_help_via_addopts.result_stdout_fnmatch_lin": {"doc_hash": "c9d7c9a4a09091f28f1822a71d8000fde07c5c1782bca67705cc7234a4541968"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_help_and_version_after_argument_error_test_help_and_version_after_argument_error.None_2": {"doc_hash": "c8c0208dc39c71e69bf349b57fa67285aaa671f10ae56c3f3bf7b5102ef9292e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_config_does_not_load_blocked_plugin_from_args_test_config_does_not_load_blocked_plugin_from_args.None_1": {"doc_hash": "0bc7570600dadb3da594e08f4a914c124ae11563c45dc996654b8860c9536b22"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_config_blocked_default_plugins_": {"doc_hash": "048b36ea6ab9b36496e3a6b3e4b967540628a80d5fdd733ef75740655f7346c1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_from___future___import_ab_conftest_setinitial.conftest__set_initial_con": {"doc_hash": "98a63d8430c637094b22b119811e079b88df87700719a688c2cab68b0399e3ca"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_TestConftestValueAccessGlobal_TestConftestValueAccessGlobal.test_basic_init.assert_conftest__rget_wit": {"doc_hash": "31ad7c469ee38bffd15c636c060e9591931e5f2cda6b68383c4130e2ed7ed216"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_TestConftestValueAccessGlobal.test_immediate_initialiation_and_incremental_are_the_same_TestConftestValueAccessGlobal.test_immediate_initialiation_and_incremental_are_the_same.None_3": {"doc_hash": "da84c2d69502f751b26e1ecfae83c37a97e9c311a8c418781165ea0d14ddad8c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_TestConftestValueAccessGlobal.test_value_access_not_existing_TestConftestValueAccessGlobal.test_value_access_by_path.None_1": {"doc_hash": "c4c4288cd7b2dc87c075527b34f78542f6297bb1e4dee9433cd7c1b8192d5085"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_TestConftestValueAccessGlobal.test_value_access_with_confmod_TestConftestValueAccessGlobal.test_value_access_with_confmod.assert_path_purebasename_": {"doc_hash": "335df73030cac1c63191464cbbb10574e2e53b73407382c798aec1730f25ecd5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftest_in_nonpkg_with_init_test_conftest_in_nonpkg_with_init.ConftestWithSetinitial_tm": {"doc_hash": "87a015b026594c65c1ff287014cac0782103620c8a44153def6e88e6c3433a35"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_doubledash_considered_test_issue151_load_all_conftests.assert_len_d_len_name": {"doc_hash": "7dc4ad3f454e4a0bea1aadc758295cba11da51eb901cb2bbc0f72cb3346ed39b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftest_global_import_test_conftest_global_import.assert_res_ret_0": {"doc_hash": "484bc43c54568c261fd6431d26d168fba4b9441e940ca6fc473e0d26380fe4d6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftestcutdir_test_conftestcutdir.None_5": {"doc_hash": "431c87ff2974871b984a112ccefccfc4535eba9bc00862c8445ddbc1347407a9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftestcutdir_inplace_considered_test_setinitial_conftest_subdirs.if_name_not_in_whatever.else_.assert_len_conftest__conf": {"doc_hash": "1092b2b89c2f73f58d3a081fbd1aba8bd9b965a5e5a2492d95dcb5090f95977d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftest_confcutdir_test_conftest_confcutdir.assert_warning_could_no": {"doc_hash": "972338358d19f087bb926cd39a63536ef753d8cce391778911d8f65298bdb1ae"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftest_symlink_test_conftest_symlink.None_2": {"doc_hash": "f4d840fb5b788042a766ec5fc0938b567483234cb6a5b8ea3babb7e8ee667180"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftest_symlink_files_test_conftest_symlink_files.assert_result_ret_EXIT": {"doc_hash": "97f96aeb194053048cdb2c14b5d2ebadfdca7b935dd5284dbe711882cbe4b252"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_no_conftest_test_conftest_existing_resultlog.result_stdout_fnmatch_lin": {"doc_hash": "f66d6c325bab0868b7071fa783554a3cb4b70cb83f736aee28516f164d67dc31"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftest_existing_junitxml_test_conftest_existing_junitxml.result_stdout_fnmatch_lin": {"doc_hash": "65584770ed2d587793076f7404d5f10bda56365b3487717a0108802e09f4911e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftest_import_order_test_conftest_import_order.assert_conftest__getconft": {"doc_hash": "eb935e23c8135d48b137e738361b2d84b1584c6bf56b31fd4ed5dcbd63b22340"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_fixture_dependency_test_fixture_dependency.result_stdout_fnmatch_lin": {"doc_hash": "4f7ed76e080116cc73e4a42a6c958084895eb3478e5708ff138e1527ca05a6bf"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftest_found_with_double_dash_test_conftest_found_with_double_dash.result_stdout_fnmatch_lin": {"doc_hash": "589c628adec0c95b3adc29e5852270e76937e1d89a370edd185656b476b5cfc8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_TestConftestVisibility_TestConftestVisibility._setup_tree.return._runner_runner_packa": {"doc_hash": "006a44a0bc31859e9b49910f3c482cedc79ce00b85c889f6fb1167b173e10df4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_TestConftestVisibility._N_B_swc_stands_for__TestConftestVisibility.test_parsefactories_relative_node_ids.with_dirs_chdir_as_cwd_.reprec_assertoutcome_pass": {"doc_hash": "90e931e556b0b4cdd16eb0f95d201495194c800924692d841b98b8859014f319"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_search_conftest_up_to_inifile_test_search_conftest_up_to_inifile.result_stdout_fnmatch_lin": {"doc_hash": "78386e5c1ef79263dd62c3f27a0613ac17983f2c3a7852e444e33ef2eeeda89b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_issue1073_conftest_special_objects_test_conftest_exception_handling.assert_raise_ValueError_": {"doc_hash": "b27818db82636d9cbbbd755742fc0b9807db9eba32eafa66516d30643e93cbf1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_hook_proxy_test_hook_proxy.result_stdout_fnmatch_lin": {"doc_hash": "aa039867fb0f858d79c9b2f27fff024cb5a4e86eb3a46bec6c94f963d4f91e3d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_required_option_help_": {"doc_hash": "592b7e43a2a1b449702998b2d1f7b0b8435f06d034a5500629adf059d749164c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py__encoding_utf_8_TestDoctests.test_collect_testtextfile.assert_len_items_0": {"doc_hash": "468181e860c7e0f2e4b9d13729e61a9bd9b73f45599bf931a41a43774b34fbb4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_collect_module_empty_TestDoctests.test_collect_module_single_modulelevel_doctest.for_p_in_path_testdir_t.None_3": {"doc_hash": "cf302b305642d94174c1924eae9fbe785848efb6ff9d6db727085abaa33b374a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_collect_module_two_doctest_one_modulelevel_TestDoctests.test_collect_module_two_doctest_one_modulelevel.for_p_in_path_testdir_t.assert_items_0_parent_is": {"doc_hash": "3fa66b2b48d9e4f767443fcd0c7c84c1fef7d4a449a767323fcb8647cbfae03a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_collect_module_two_doctest_no_modulelevel_TestDoctests.test_collect_module_two_doctest_no_modulelevel.for_p_in_path_testdir_t.assert_items_0_parent_is": {"doc_hash": "2d1ce88fe88d77f8cacd576c276292041551edfa24c4a000011b754aea3ae2db"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_simple_doctestfile_TestDoctests.test_new_pattern.reprec_assertoutcome_fail": {"doc_hash": "555e81f8dc46edc4775a634f4e54960a8355ceaded81daf19b9baa9c94d53183"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_multiple_patterns_TestDoctests.test_multiple_patterns.None_4": {"doc_hash": "2e2e68fafe38f2f1284d0e3a4adf0649f055fe4c9d2397898fb542a8bdaee07d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_encoding_TestDoctests.test_encoding.result_stdout_fnmatch_lin": {"doc_hash": "a7055e0d1b0eb4bdeac3ea5a1aeb5501960aaff9518adb9c915ca90e168b6599"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_doctest_unexpected_exception_TestDoctests.test_doctest_skip.result_stdout_fnmatch_lin": {"doc_hash": "d5eeb9a2c6904a306e5b2f859f5b70862edca14ca49bfc39e8fc3cf7bf2ef080"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_docstring_partial_context_around_error_TestDoctests.test_docstring_partial_context_around_error.assert_text_line_after_": {"doc_hash": "f90546c7973f2a1d2592669e1c38b95158e118d044cb0dbf3348f84cea7c0730"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_docstring_full_context_around_error_TestDoctests.test_docstring_full_context_around_error.result_stdout_fnmatch_lin": {"doc_hash": "90df6bfa9e0a8b7abfedc2161de42ee310d9481646650450ee5165c4800eea8d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_doctest_linedata_missing_TestDoctests.test_doctest_linedata_missing.result_stdout_fnmatch_lin": {"doc_hash": "9b3c3a861a7dcb7120f3cc8aa1ea54e745581486601469c1eceab87132f27ec2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_doctest_unex_importerror_only_txt_TestDoctests.test_doctest_unex_importerror_only_txt.result_stdout_fnmatch_lin": {"doc_hash": "beb19389516a6b0ca02e54a366dbcfc1fb8917861f7d180c591753395eeb9894"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_doctest_unex_importerror_with_module_TestDoctests.test_doctest_unex_importerror_with_module.result_stdout_fnmatch_lin": {"doc_hash": "17f420fbbf7a7a72e3d3dc236596176a682ea204818587077614a5226c2568d3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_doctestmodule_TestDoctests.test_doctestmodule_external_and_issue116.result_stdout_fnmatch_lin": {"doc_hash": "2fee625a688c59284ddc33cab2954d82979b266e41ae7ed6ecbe1ada35c6a85c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_txtfile_failing_TestDoctests.test_txtfile_with_fixtures.reprec_assertoutcome_pass": {"doc_hash": "98d0acee99d7b478e2aad315236a1fa8adbbf64ec671098117e4563359130978"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_txtfile_with_usefixtures_in_ini_TestDoctests.test_doctestmodule_with_fixtures.reprec_assertoutcome_pass": {"doc_hash": "8ac434e3bfdcc50adb69d3acc390c61acfbd047b83488cbde6fc57f1b098ad32"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_doctestmodule_three_tests_TestDoctests.test_doctestmodule_three_tests.reprec_assertoutcome_pass": {"doc_hash": "7e21592260d7b35732900ff43ef98ce36b69f059f804a209b37eee848c364944"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_doctestmodule_two_tests_one_fail_TestDoctests.test_doctestmodule_two_tests_one_fail.reprec_assertoutcome_fail": {"doc_hash": "07ede6c2ba7740d421c827c81be858c56145c37266223444718ffa130ebd0d04"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_ignored_whitespace_TestDoctests.test_ignored_whitespace.reprec_assertoutcome_pass": {"doc_hash": "2756551b491f8ff469d1d74b6f61ae3976e071ae4ea4f1505bd7ddbf58cd3cbd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_non_ignored_whitespace_TestDoctests.test_non_ignored_whitespace.reprec_assertoutcome_fail": {"doc_hash": "c9903ea39184d5746d6da40b3a371f086f96f8ac470b1a790a8c3088f3adc03a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_ignored_whitespace_glob_TestDoctests.test_ignored_whitespace_glob.reprec_assertoutcome_pass": {"doc_hash": "dca0f15d3fe711dfdc87374bd1834f10ff408546dfcd5165b2586943d06628a1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_non_ignored_whitespace_glob_TestDoctests.test_non_ignored_whitespace_glob.reprec_assertoutcome_fail": {"doc_hash": "a30d234223345519e3f52814f7f449e05572fd7591e50728121ce7d6f30bb496"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_contains_unicode_TestDoctests.test_contains_unicode.result_stdout_fnmatch_lin": {"doc_hash": "500ce395c61afc2f4913f62430dcc39407ddb2f4b0a71941858724edfcee4b10"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_ignore_import_errors_on_doctest_TestDoctests.test_ignore_import_errors_on_doctest.reprec_assertoutcome_skip": {"doc_hash": "09ad4699416faf2bfb0c2562ed1985751ead2038ef4da07466673feafa45bed0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_junit_report_for_doctest_TestDoctests.test_junit_report_for_doctest.reprec_assertoutcome_fail": {"doc_hash": "d1401fc543981e6313aededa463cb9a9fd9c95b51d9257be40ed79a45e66712b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_unicode_doctest_TestDoctests.test_unicode_doctest.result_stdout_fnmatch_lin": {"doc_hash": "6c3fa6a50f5b5b717ca7040cd91017fd1105f100d48e7bc5422aa99e52e051a1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_unicode_doctest_module_TestDoctests.test_unicode_doctest_module.result_stdout_fnmatch_lin": {"doc_hash": "e6cbd4e5b02fe6414b4720a5dd6db03f188fafda8d23caeb7149429094f2f2ab"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_print_unicode_value_TestDoctests.test_print_unicode_value.result_stdout_fnmatch_lin": {"doc_hash": "4918c485b897253574804d81d44bae6d6c94c411846e79e5c15b56b2c2fa4295"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_reportinfo_TestDoctests.test_reportinfo.assert_reportinfo_1_1": {"doc_hash": "9eb1a1300509f4d952e8819d16dfda88c4b9d2a968c1c2a46492d5937043e6ad"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_valid_setup_py_TestDoctests.test_invalid_setup_py.result_stdout_fnmatch_lin": {"doc_hash": "c2bc1310c69fad097970ea5bbcc0d7a1f6a6d405b75d94490badf3a71952ccba"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestLiterals_TestLiterals.test_allow_unicode.reprec_assertoutcome_pass": {"doc_hash": "5b480d10cf9c3901ebce7358e7fc26b685ae7c2058d8faa26e6ba63745d00d81"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestLiterals.test_allow_bytes_TestLiterals.test_allow_bytes.reprec_assertoutcome_pass": {"doc_hash": "ec1c5e8939d3190ff04f5c690b7da94161e4939b49bd87a9f55856cddde8c4e3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestLiterals.test_unicode_string_TestLiterals.test_unicode_string.reprec_assertoutcome_pass": {"doc_hash": "ade753800717f1537a412af0e9d24368ff57f753c484fcb87e5777479c745937"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestLiterals.test_bytes_literal_TestLiterals.test_bytes_literal.reprec_assertoutcome_pass": {"doc_hash": "e2b42e105778aeddceb09d716833ccea216ae3cccabbdb943c0c608fb3cf79e2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestSkips_TestDoctestSkips.test_vacuous_all_skipped.reprec_assertoutcome_pass": {"doc_hash": "9440086e77812287fd08ae7b6c4debb3a59d05325052d804c827a4db9a064a09"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestSkips.test_continue_on_failure_TestDoctestSkips.test_continue_on_failure.result_stdout_fnmatch_lin": {"doc_hash": "ab0dcc1e80408b7232351f535890b6c211d06903413c57864557978110f3e1c1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestAutoUseFixtures_TestDoctestAutoUseFixtures.test_doctest_module_session_fixture.result_stdout_fnmatch_lin": {"doc_hash": "0c3b067a69a2557b3fdea327bfb509658ae3e2a9911b95bf714bb045146dea51"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestAutoUseFixtures.test_fixture_scopes_TestDoctestAutoUseFixtures.test_fixture_scopes.result_stdout_fnmatch_lin": {"doc_hash": "897dbe66245715b28d9976d223c6b5703c7e2d0f43831049d9203107571c2e72"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestAutoUseFixtures.test_fixture_module_doctest_scopes_TestDoctestAutoUseFixtures.test_fixture_module_doctest_scopes.result_stdout_fnmatch_lin": {"doc_hash": "68b25299146493c358f2117a31fd171a0a5d361026405a8d8cebd1113502d8ee"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestAutoUseFixtures.test_auto_use_request_attributes_TestDoctestAutoUseFixtures.test_auto_use_request_attributes.result_stdout_fnmatch_lin": {"doc_hash": "b4c60d519e6ca152d26430f909d557fcde97c93759bc54dcbd8b17bb619bb683"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestNamespaceFixture_TestDoctestNamespaceFixture.test_namespace_doctestfile.reprec_assertoutcome_pass": {"doc_hash": "de1db140d7fdcbf8729beb1a2735eedcded093efd5ad62b9f5657d3f516b97ce"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestNamespaceFixture.test_namespace_pyfile_TestDoctestNamespaceFixture.test_namespace_pyfile.reprec_assertoutcome_pass": {"doc_hash": "0c3bfe4d92fad7a8d53251a4a2b1b7cfe5767f7168a9644d9826c8c53f580dd4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestReportingOption_TestDoctestReportingOption._run_doctest_report.return.testdir_runpytest_doct": {"doc_hash": "4a0b9a41bd8dd9aef79d72665e1883477176c837cf266560044803e8321e213c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestReportingOption.test_doctest_report_udiff_TestDoctestReportingOption.test_doctest_report_udiff.result_stdout_fnmatch_lin": {"doc_hash": "6aa3e892df9577c7465054baee3d344830f1fbbf36afa810da68d0eb34bac4a7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestReportingOption.test_doctest_report_cdiff_TestDoctestReportingOption.test_doctest_report_cdiff.result_stdout_fnmatch_lin": {"doc_hash": "5d813fabe8e266225ffb930e7495f005c10f2e8c78a18f46fdeac335e2a83e4b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestReportingOption.test_doctest_report_ndiff_TestDoctestReportingOption.test_doctest_report_ndiff.result_stdout_fnmatch_lin": {"doc_hash": "a4906243b94eb9a9264f901dac56e62ddb2be629d3cdfc700d2ac90971b35ce8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestReportingOption.test_doctest_report_none_or_only_first_failure_TestDoctestReportingOption.test_doctest_report_invalid.result_stderr_fnmatch_lin": {"doc_hash": "3ae542933e67d3e20b3eb9452e2fe50fc5acca81f7f2a1130caad35319fa62f2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_test_doctest_mock_objects_dont_recurse_missbehaved_": {"doc_hash": "1b681130255e2022123a5846bb0297c7930662402e34b3d65cca1ca347b6923a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_entry_points.py_from___future___import_ab_": {"doc_hash": "941d3de9381e4d88fe66597d008cee41bbd3cc3dba3b56d9252ca02bd499b8f5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_helpconfig.py_from___future___import_ab_": {"doc_hash": "1036a2ac98018acbce7455e7388071256c48f6a5d99f45f28aa0eac1f76baefd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py__coding_utf_8__assert_attr.assert_on_node_expecte": {"doc_hash": "2eab2b7f7b515e3646e76c67aedb8986b4c8653ed4f2b0d76d17a0a57f755c65"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_DomNode_DomNode.next_siebling.return.type_self_self___node_ne": {"doc_hash": "18f7187e478ad99f392be711c8376b9f5ea34ad757a004534255a2d108d19760"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython_TestPython.test_summing_simple.node_assert_attr_name_py": {"doc_hash": "d74ddec559555e53aa1a1b492052e9eadacdf30af240b238e1310279b2df0e55"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_summing_simple_with_errors_TestPython.test_summing_simple_with_errors.node_assert_attr_name_py": {"doc_hash": "845e1bdeb535a5ac34d51e48b79bddc6019bea4e770f20f5ab814ef4517077ed"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_timing_function_TestPython.test_timing_function.assert_round_float_val_": {"doc_hash": "24cb0acb1307828ef2d5fe60068a9dd531ca6d8eb081eeec6f5f06a0fb970990"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_junit_duration_report_TestPython.test_junit_duration_report.if_duration_report_to.else_.assert_val_1_0": {"doc_hash": "9a1e8093e03b6aa7deda555327563cc1313515f618551e1bc75784705358fd8b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_setup_error_TestPython.test_setup_error.assert_ValueError_in_fn": {"doc_hash": "c8a597b3af15e528f3428d4f8a0efefe05cb7b1fdddb5712eb9aecb2eadadc41"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_teardown_error_TestPython.test_teardown_error.assert_ValueError_in_fn": {"doc_hash": "e8599548eef90a2c71a1a5ef24d1877b49724e9bfc88a08ec80ce1af27ba71eb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_call_failure_teardown_error_TestPython.test_call_failure_teardown_error.snode_assert_attr_message": {"doc_hash": "d304af3e698187ff6f84f0084efc5adf0a48ba2026f2485efa4163f7d71576f1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_skip_contains_name_reason_TestPython.test_skip_contains_name_reason.snode_assert_attr_type_p": {"doc_hash": "c5fcad3f04d6c2263824af897cfd145eeaff9c4ce847194a9e53e7d709995f6a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_mark_skip_contains_name_reason_TestPython.test_mark_skip_contains_name_reason.snode_assert_attr_type_p": {"doc_hash": "b01eadc1d27928ec29164d67e19b25a1e213c4a63c92f866f715b337021ad9ee"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_mark_skipif_contains_name_reason_TestPython.test_mark_skipif_contains_name_reason.snode_assert_attr_type_p": {"doc_hash": "c83497dd0ca5a6cdd425dc11511c75ca1e7f3b3ee7829613be9617dc905f96b2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_mark_skip_doesnt_capture_output_TestPython.test_classname_instance.tnode_assert_attr_": {"doc_hash": "0f4bc94f83abf0d21cf4c571c80e35fec991b1064f603445279a18dd95b078e1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_classname_nested_dir_TestPython.test_internal_error.assert_Division_in_fnod": {"doc_hash": "ea1be0328eefa4f8415ce55e884e4239f91ad344bc60b98b3ff640edc49ec1c6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_failure_function_TestPython.test_failure_function.if_junit_logging_syst.elif_junit_logging_no.None_1": {"doc_hash": "2ecd59cf405cb0033332a3a2834736d623d23120752149fa3ab1bcce283ccb1d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_failure_verbose_message_TestPython.test_failure_verbose_message.fnode_assert_attr_message": {"doc_hash": "bdcbd53afb5eb4a9013a259982f895df57b640fcd7f723d671da47022d7ccb76"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_failure_escape_TestPython.test_failure_escape.for_index_char_in_enumer.assert_text_s_n_c": {"doc_hash": "b1748822fbbfd1e37103562743b150354d61a8c44a0a5f870ec45d4daa30b828"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_junit_prefixing_TestPython.test_junit_prefixing.tnode_assert_attr_": {"doc_hash": "edf2d77262cf2b4260d8ac2e0b3f1629dd1f363262b1fdf56bfaad1eb0285f8d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_xfailure_function_TestPython.test_xfailure_function._assert_ValueError_in_": {"doc_hash": "7ef27ab74b27811d756dde13aab74d5d647d75f503ba388375a967245f2c2202"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_xfailure_marker_TestPython.test_xfailure_marker.fnode_assert_attr_type_p": {"doc_hash": "37e7cb508d52468ed96e210b095d0503688b68fb043b1c545bd506af7b5f2bab"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_xfail_captures_output_once_TestPython.test_xfail_captures_output_once.None_2": {"doc_hash": "20d4cd50722c6eb8fe3fff988a1dc0cbbe8b2abc812391bb757c3f04050873b6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_xfailure_xpass_TestPython.test_xfailure_xpass.tnode_assert_attr_classna": {"doc_hash": "ed418d019908a6e059d258b39cabff35445fea6056557ded6b19a0b2c7b1f5c6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_xfailure_xpass_strict_TestPython.test_xfailure_xpass_strict.fnode_assert_attr_message": {"doc_hash": "f994a3d7303b7e0677be69685dfdf63a8139ce4c6ea6832d2c28acb965d0603a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_collect_error_TestPython.test_unicode.if_not_sys_platform_start.assert_hx_in_fnode_toxm": {"doc_hash": "a432cff6db550b0e2393b09212cd36ca0960db2276f3fef96e6d833a16a12868"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_assertion_binchars_TestPython.test_pass_captures_stdout.assert_hello_stdout_in_": {"doc_hash": "99669a5028b3606d367b5584fd9364fe1f744a984dae55843c59ee422a3bbd03"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_pass_captures_stderr_TestPython.test_pass_captures_stderr.assert_hello_stderr_in_": {"doc_hash": "26be447e75b423bcfe5ee66349f5feb8e9c4c8f7797c7b544a9c122d68c89b23"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_setup_error_captures_stdout_TestPython.test_setup_error_captures_stdout.assert_hello_stdout_in_": {"doc_hash": "b01adae391e0bd43d132193205b9104668b5666f99c1c64b7eb32366ac3fe1f9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_setup_error_captures_stderr_TestPython.test_setup_error_captures_stderr.assert_hello_stderr_in_": {"doc_hash": "2def34ad5873c16778a90ec281116359a41c355a8b1d2b1535e5aa2e471316a9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_avoid_double_stdout_TestPython.test_avoid_double_stdout.assert_hello_stdout_tear": {"doc_hash": "2bfdbb9d24f430806d9cf8976009797a728c6c7ef9218cf1730307157ea87693"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_mangle_test_address_test_dont_configure_on_slaves.None_1": {"doc_hash": "9cc31dc6ecc98bbb6d823c5d786f18cbcacdeeb795d985d59bdda746133d8b46"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestNonPython_TestNonPython.test_summing_simple.assert_custom_item_runte": {"doc_hash": "b612a622caf3d5150c16f7792d5f136b4f921a4f866a6cf7e3eaf1f697fd23c4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_nullbyte_test_nullbyte.assert_x00_in_text": {"doc_hash": "95abd6ab50bc59f2bd0f18c6241491bcf0848bc5fdfc2dc77a90c712d5282505"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_nullbyte_replace_test_nullbyte_replace.assert_x0_in_text": {"doc_hash": "685c876a20df9a8f8b04240b5ce52c31a9831a7ab5989b5cfb3396a55aa18981"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_invalid_xml_escape_test_invalid_xml_escape.for_i_in_valid_.assert_chr_i_bin_xml_": {"doc_hash": "641ca02037d3bc3d8991e2a487b948fd414f511a682f30a8d4de9ded5de8e740"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_logxml_path_expansion_test_logxml_path_expansion.assert_xml_var_logfile_": {"doc_hash": "e2bbbdce8f83977559996407f5f923cda7e98b9f6ccd27ee59c8813bb18baf47"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_logxml_changingdir_test_escaped_parametrized_names_xml.node_assert_attr_name_te": {"doc_hash": "19996079bdb905e9e1128c118edacf0be02c572af4748d56d005ea81454af4c7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_double_colon_split_function_issue469_test_double_colon_split_function_issue469.node_assert_attr_name_te": {"doc_hash": "545cf6734bed5023162eb8f2585b91a690fbad54bf01c717d3ef42b96dcebe32"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_double_colon_split_method_issue469_test_double_colon_split_method_issue469.node_assert_attr_name_te": {"doc_hash": "f32638e70bd50abcddee8f595bf96a34047c60f39808e077c6e22535ea89c55f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_unicode_issue368_test_unicode_issue368.log_pytest_sessionfinish_": {"doc_hash": "6156af2c643e294bc3ac6c0f4f29230fddf9393dcb20aa452dfc09cf0642085c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_record_property_test_record_property.pnodes_1_assert_attr_nam": {"doc_hash": "6313f5dc27066cf2573b508207267e1a6027252476aa0b6ddd3cea7f131bd1f0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_record_property_same_name_test_record_fixtures_without_junitxml.assert_result_ret_0": {"doc_hash": "8ab23ff81127736604e8694d8c0bdbc3c70b31212db6b1bb946a33d7eed124ab"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_record_attribute_test_record_attribute.result_stdout_fnmatch_lin": {"doc_hash": "92f9b0516b2ae533585e58d02758abc1626ba60470e6c7dc672e71368f732819"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_record_fixtures_xunit2_test_record_fixtures_xunit2.result_stdout_fnmatch_lin": {"doc_hash": "4ecf97591bbf8206ab0df096b39368cd37ff4a329a766aa70c937bd102adc5aa"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_random_report_log_xdist_test_random_report_log_xdist.assert_failed_test_x": {"doc_hash": "2f7d9eeec3c975cb0c68c4c4ef2d785230997ca320e3c463b8111b2d7ee58874"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_runs_twice_test_runs_twice_xdist.assert_first_second": {"doc_hash": "11ad5fc85f256bfb8e12eb37915e16707efd87c0a21b2c9fde83e3ef442f8908"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_fancy_items_regression_test_fancy_items_regression.assert_items_": {"doc_hash": "a676eae070cab645b194e3bf8397fc67dffe4daf877cca6bf6d7438949651c11"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_global_properties_test_global_properties.assert_actual_expected": {"doc_hash": "6c03d5d7b30c5f9f966f222f423caeb2bd3f4111d65c9e35c690ad9c7494f287"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_url_property_test_url_property.assert_": {"doc_hash": "886c6dcb5e7cf6a242e92eab44bfafe5b23c85627f1e1525ab3d7f32dc90b779"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_set_suite_name_test_set_suite_name.node_assert_attr_name_exp": {"doc_hash": "268ddfd563d19060261276b7a0be5f4b57048f6820c5f8e4ee9a731296b950fd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_escaped_skipreason_issue3533_": {"doc_hash": "e4ae41ede5e808818429579335f43afe734137d11c1ab4eb769b9d8d1cf7968d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_from___future___import_ab_ignore_markinfo.pytest_mark_filterwarning": {"doc_hash": "85869d35f8291ae19169d27af36d2e223a8c86ea1a0205b266b550dde8d7ec19"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestMark_TestMark.test_pytest_mark_name_starts_with_underscore.with_pytest_raises_Attrib.mark__some_name": {"doc_hash": "3d8424c724911d506bfdc7053cd7548d134dcc8a1bd4758d441e167f97cd5e7b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_marked_class_run_twice_test_marked_class_run_twice.rec_assertoutcome_passed_": {"doc_hash": "fd37f0810089c3d6d887810be4a933cf7c8f841eee508d5aae2b4198153d3780"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_ini_markers_test_ini_markers.rec_assertoutcome_passed_": {"doc_hash": "8185952f1405e5296393ebe2497a45b943636c4cf70350b9209b9b0e039ff40e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_markers_option_test_ini_markers_whitespace.rec_assertoutcome_passed_": {"doc_hash": "c4a842a9184a499386cf73bdc6631f2ee0fe8779b8722a832dd3af64adcc5500"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_marker_without_description_test_marker_without_description.rec_assert_outcomes_": {"doc_hash": "8dd4921be8ee4b8f3cc53b7a539c5f2d7685e2c3d46c8d862c3d44ce6a9aabe7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_markers_option_with_plugin_in_current_dir_test_markers_option_with_plugin_in_current_dir.result_stdout_fnmatch_lin": {"doc_hash": "b6e4ccf6a57efcef7021f0d4cbbfb6f26d2b8993d1e44af082946b910f8df96e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_mark_on_pseudo_function_test_strict_prohibits_unregistered_markers.result_stdout_fnmatch_lin": {"doc_hash": "6bf0d9139c92d9f35271756ec356d2788eba15bd7f8047acffe2bfbd84004242"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_mark_option_test_mark_option.assert_list_passed_li": {"doc_hash": "d944d1ad698e69535b9ced3b505ca87641b3c8057cc8d841c6419a87f8bf90ea"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_mark_option_custom_test_mark_option_custom.assert_list_passed_li": {"doc_hash": "aee38f3672a979599546df4151d2478f540cd005832fd32a60e924ae12025a14"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_keyword_option_custom_test_keyword_option_considers_mark.assert_len_passed_1": {"doc_hash": "c9305abda704c05c8f037576008b276422542daa1f8831f0cde16d75f7747a7d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_keyword_option_parametrize_test_keyword_option_parametrize.assert_list_passed_li": {"doc_hash": "39116e8c16830ff328158c718ab7451ee92e34c7e8f498de3b8cc7f2ee79ee4e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_keyword_option_wrong_arguments_test_keyword_option_wrong_arguments.assert_expected_result_in": {"doc_hash": "4085b496bdbace525f8d04eaf1a364d3ebf85dc8803659cf02c7149f8c443dbb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_parametrized_collected_from_command_line_test_parametrized_collected_from_command_line.rec_assertoutcome_passed_": {"doc_hash": "dc3d77706f2f52386686eda2a9539ecdf9c2054e5b9eae94a554ce0c104d154b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_parametrized_collect_with_wrong_args_test_parametrized_collect_with_wrong_args.result_stdout_fnmatch_lin": {"doc_hash": "5b59034220d623ae165c1335e1ee835a91bcc9244c539ec1a5ba931227095bf4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_parametrized_with_kwargs_test_parametrized_with_kwargs.assert_result_ret_0": {"doc_hash": "2d99feab059c8ae4c83f8feaf49280961fed1c11865f357568f5fe3c1f0405ea"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional_TestFunctional.test_merging_markers_deep.for_item_in_items_.assert_x_for_x_in_item_i": {"doc_hash": "31ae5d6db2a466f0dc4005721ea0d36b25e3538a135b089592d70c413aab025d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_mark_decorator_subclass_does_not_propagate_to_base_TestFunctional.test_mark_decorator_subclass_does_not_propagate_to_base.self_assert_markers_items": {"doc_hash": "581e8e35e722dad49d7ba3275a2f4d288ed5bdfcecdbe83ebb7f739104846a7d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_mark_should_not_pass_to_siebling_class_TestFunctional.test_mark_should_not_pass_to_siebling_class.assert_list_sub_item_iter": {"doc_hash": "43dfcdcc0cd8e4922e579cfcdfc54bfbac2ed3f4d03224bfc8e42c246b5c75f3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_mark_decorator_baseclasses_merged_TestFunctional.test_mark_decorator_baseclasses_merged.self_assert_markers_items": {"doc_hash": "18262ebe1c74cef575ca44dc5cb806d84942c7e3592625e870780110f7af9445"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_mark_closest_TestFunctional.test_mark_closest.None_4": {"doc_hash": "9847262867fab0d568ec5d096dee8740276436dafdf11cc46dd0f99fe4d76bb7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_mark_with_wrong_marker_TestFunctional.test_mark_dynamically_in_funcarg.result_stdout_fnmatch_lin": {"doc_hash": "979e925cf4816b559504c6022583f9ceab285cb5d9f173d531e07ad5de4b4315"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_no_marker_match_on_unmarked_names_TestFunctional.test_no_marker_match_on_unmarked_names.assert_len_deselected_tes": {"doc_hash": "7b6de7278a050fdf9aaa5de9e6ac00d65bf83ad5f69a9dc03f70e8281b8e15e6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_invalid_m_option_TestFunctional.test_keywords_at_node_level.reprec_assertoutcome_pass": {"doc_hash": "83cce92925474754f9bc38c5b75552e51821979c78be0bf965ecbbdd22d5a8db"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_keyword_added_for_session_TestFunctional.test_keyword_added_for_session.reprec_assertoutcome_pass": {"doc_hash": "cbcb64209cf0ef57e9b223ed0704c5972f43d3ed08827f217c971510e1afbeb4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.assert_markers_TestFunctional.assert_markers.for_name_expected_marker.assert_markers_set_exp": {"doc_hash": "71e74099c67bec540c2cd368eaa3fea1f76e1c18c71bd5fffdd51433e2df1f48"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_mark_from_parameters_TestFunctional.test_mark_from_parameters.reprec_assertoutcome_skip": {"doc_hash": "a259f54eae45928cddeaa606310e0cb6c4481c55c9095271a2675c56cda16469"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestKeywordSelection_TestKeywordSelection.test_select_simple.check_TestClass_and_test": {"doc_hash": "7ae7da1e591a11a69f7126912a977c0f4488595adda140a30c4d8801e25d05fd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestKeywordSelection.test_select_extra_keywords_TestKeywordSelection.test_select_extra_keywords.assert_dlist_0_items_0_": {"doc_hash": "fffaf311d608f4eeb1f5d170b7254839d2fbdf0b0362b1a5bf117f0d95771d59"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestKeywordSelection.test_select_starton_TestKeywordSelection.test_select_starton.assert_item_name_test": {"doc_hash": "f95645a55642eddfc5ba753cf7b2f2c112d67c4b1c18be17e8a599ae35c579de"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestKeywordSelection.test_keyword_extra_TestKeywordSelection.test_keyword_extra_dash.assert_passed_skipped_": {"doc_hash": "8b1f48aa043e6ae7c31e946d2ecaf3105da06c8e0a5ee600367b4fe6da5e3e5a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestKeywordSelection.test_no_magic_values_TestMarkDecorator.test__eq__.assert_lhs_rhs_ex": {"doc_hash": "0ad3fa418d8b3726a599ef88b4ab50d3fcfd909da9fc7615a44c763fa948de2a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_parameterset_for_parametrize_marks_test_parameterset_for_parametrize_marks.if_mark_xfail_.assert_result_mark_kwargs": {"doc_hash": "461b4c92c594b1f97e09948ee8eb0ced8959d5ecca1d28df3f5e4bb5e5e16126"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_parameterset_for_fail_at_collect_test_parameterset_for_fail_at_collect.assert_result_ret_EXIT": {"doc_hash": "e3f265aacbb1374110599948cd1974fff17227e5f8a8c4b3ad0af910f3d83191"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_markers_from_parametrize_test_markers_from_parametrize.result_assert_outcomes_pa": {"doc_hash": "bab4a9b7726b54aedd07eb6e188b7c6d8f1d47d82e5ca56ee6276ba07562c10e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_pytest_param_id_requires_string_test_pytest_param_id_allows_none_or_string.assert_pytest_param_id_s_": {"doc_hash": "f2e7eb8b7912dee0857169345f7e1e118389e5177c880ad0ae39c3b78d307e08"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_pytest_param_warning_on_unknown_kwargs_": {"doc_hash": "c682dd9550e12a9581812da85708f66aa3e48fdbe6236dfed2d7af9356db9624"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_modimport.py_subprocess_": {"doc_hash": "c854d7736907b2f3393f98d340feb24456742b87d10599e189cecddeb0fb8c86"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_from___future___import_ab_test_setattr.assert_A_x_5": {"doc_hash": "bdf206b60797b5d7d016c1d44d4960c890c6fb258288ad93b7d12f01552b0470"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_TestSetattrWithImportPath_TestSetattrWithImportPath.test_delattr.assert_os_path_abspath": {"doc_hash": "41819238cb5332aa6a5921ea144586547443354f6a22d46eeb35c0cf02fb66b8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_delattr_test_delattr.None_3": {"doc_hash": "eea571a8971bc68d3d428e52922b1c8f0b1eaad3f7ff6255b3d20aa057f997bc"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_setitem_test_setitem.assert_d_x_5": {"doc_hash": "376ede7c8d8abe99f369a226821b233488cb61d1bb0272a93fc3de1e2c1cf573"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_setitem_deleted_meanwhile_test_setenv_deleted_meanwhile.None_1.else_.assert_key_not_in_os_envi": {"doc_hash": "1be3f2af8c605c5048a4b7ffa3fa000fa7d5ffa680798416d7d87fc8616e7c94"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_delitem_test_delitem.assert_d_hello_wo": {"doc_hash": "979260e1028e754ecd74bd5e51fc96817c0af36f75a5481aa4680bd667bcf057"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_setenv_test_delenv.try_.finally_.if_name_in_os_environ_.del_os_environ_name_": {"doc_hash": "bbb6aa778c606cb1940dfdbc97884bf525ccbec138ef762aef6a7fead4247c17"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_TestEnvironWarnings_TestEnvironWarnings.test_setenv_non_str_warning.with_pytest_warns_pytest_.monkeypatch_setenv_str_se": {"doc_hash": "4394fa46b99f69f17161ab5e3b9fa8975d678ca4601dc0d70e586072c98bc2f7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_setenv_prepend_test_setenv_prepend.assert_XYZ123_not_in_os": {"doc_hash": "5300a8a055b6e8826c76f76aaac4fdaa29ee564dbeb7f75bdb32a309a13079aa"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_monkeypatch_plugin_test_issue185_time_breaks.result_stdout_fnmatch_lin": {"doc_hash": "1bee3534269b28926c7de9f09aac3ae6b9b1626444cb2115ef2e340f9c47dd67"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_importerror_test_importerror.result_stdout_fnmatch_lin": {"doc_hash": "7fb1a5b0fae26eb55bee6fb11e59ddcbb35e1038a0b55862e225aeee00b8a02d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_SampleNew_test_issue156_undo_staticmethod.assert_Sample_hello_": {"doc_hash": "770e86f796ad0a1fa89bd654a5c31a879578c43456f6d2da5e5d1512be3ae0e0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_undo_class_descriptors_delattr_test_context.assert_inspect_isclass_fu": {"doc_hash": "db8f0537681c46fbcbf49d282afb5199e3275614ff27bef931d4e056cd3fba0d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_syspath_prepend_with_namespace_packages_": {"doc_hash": "44f3676a6e876cdba8e958020359f074293d750055677b3e3afb5dc0d0218679"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nodes.py_py_test_ischildnode.assert_result_is_expected": {"doc_hash": "7a2de75479b47d7f069174075b1253e90fdc5b7eaf4f4df150f54816455a22f3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nodes.py_test_std_warn_not_pytestwarning_": {"doc_hash": "6bcea2632663319f981491e1072ec7814f32ca81c2fe51f969367e7adebcb05e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py__encoding_utf_8_test_nose_setup.result_assert_outcomes_pa": {"doc_hash": "5ef2435a43932308ef8b8c3a4f9755dead226a2d4d3c519961be5b13df3476a3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py_test_setup_func_with_setup_decorator_test_nose_setup_func.result_assert_outcomes_pa": {"doc_hash": "c4e21d58305788a1b93f7528e5c1798f4167db844477f0ad042e3d10650fd9f9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py_test_nose_setup_func_failure_test_nose_setup_func_failure_2.reprec_assertoutcome_pass": {"doc_hash": "f0dca790bbc6772fc201fd1c1c518e1801e4b1ac95b54096b1730f74cb4cff49"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py_test_nose_setup_partial_test_nose_setup_partial.result_stdout_fnmatch_lin": {"doc_hash": "b3e2cd9d2e60d15933b873cf8a92be5102265d0525e737d26b2e42e2c601467c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py_test_module_level_setup_test_module_level_setup.result_stdout_fnmatch_lin": {"doc_hash": "bea9c396c45000b50099e570319b628b179ab72df97a2f044a2ce7f56cb295e0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py_test_nose_style_setup_teardown_test_nose_setup_ordering.result_stdout_fnmatch_lin": {"doc_hash": "a02417d1e0133d9302b88eaaad19dc654ad1d8ea42db118335b779cb9bf45681"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py_test_apiwrapper_problem_issue260_test_apiwrapper_problem_issue260.result_assert_outcomes_pa": {"doc_hash": "c19b2d47cb9c4c7fb0990479d661f072bd58ca49dd2c53c4c20de074f461a09f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py_test_setup_teardown_linking_issue265_test_setup_teardown_linking_issue265.reprec_assert_outcomes_pa": {"doc_hash": "4c7faf9a206a57a686829198a40937da600ec3312d00ca9d06cc5c642ef0fb8e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py_test_SkipTest_during_collection_": {"doc_hash": "e887f4af5fb770affaa48bb72a5933af3c5b8d977afc23960c60f4e0d1970eb5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_from___future___import_ab_TestParser.test_argument.assert_str_argument_": {"doc_hash": "cc434e3b5654bbbba5e4d8d42522c8a3bfdc82d426b2e49a4af0328a1e29fdaf"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_argument_type_TestParser.test_argument_type.None_3": {"doc_hash": "6d2e39036b4eee50aa15b7c1e787cd619c54906095384057fbc923c94665b14d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_argument_processopt_TestParser.test_parse_will_set_default.None_2": {"doc_hash": "07730d35cfa5908249c152c9da6458d60164678e0a3017a531ea0ba385c3b571"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_parse_setoption_TestParser.test_parse_special_destination.assert_args_ultimate_answ": {"doc_hash": "302fafacd1810c71dde213ef388111ec8f4b4822d36389cfb7ebfaaba0e1af00"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_parse_split_positional_arguments_TestParser.test_parse_split_positional_arguments.None_6": {"doc_hash": "7b0e8fbf6e20d380983a88dfe39c0579fc33212efc3af45aec384a7ee77ce1c5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_parse_defaultgetter_TestParser.test_parse_defaultgetter.assert_option_no_is_False": {"doc_hash": "ef6f40a2545c24fa167fe4ebdb7f84e72aec28bd40ecd46622ee03458d96cf09"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_drop_short_helper_TestParser.test_drop_short_helper.assert_join_args_file": {"doc_hash": "daefe37f214dcdcd338b5a462c25fbad1ace383e8739f2272bd7e614e01d1ed5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_drop_short_0_TestParser._testing_would_be_more_h": {"doc_hash": "5f1b5237acf79a99106ed66671b7240cf73d6bf903c0ab7276058566b94916ef"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_drop_short_help1_TestParser.test_drop_short_help1.assert_doit_func_arg": {"doc_hash": "83e66f325c609bec050e40a73bba0a2aba3c2cc6efdb64fa0d5319c676fadc48"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_multiple_metavar_help_TestParser.test_multiple_metavar_help.assert_preferences_val": {"doc_hash": "3dede6b33a00b01ff9b04b9530e345dd26844afb94a09d8bf4bbf734c6b0a81f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_test_argcomplete_": {"doc_hash": "87c807cd427137ffc0492c93eec4544d251338c91b45edae8c3405f291c37bc0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pastebin.py__encoding_utf_8_TestPasteCapture.test_failed.assert_reprec_countoutcom": {"doc_hash": "cd6dc3b4157f4d07354f53a06216144d1e3c3c3ec548125ee562ebc32357f7d9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pastebin.py_TestPasteCapture.test_all_TestPasteCapture.test_all.matcher_fnmatch_lines_": {"doc_hash": "ef92c5135b41bb80598daec8da2928f66e4b93c100fc00742fb7a6ea45120afd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pastebin.py_TestPasteCapture.test_non_ascii_paste_text_TestPasteCapture.test_non_ascii_paste_text.result_stdout_fnmatch_lin": {"doc_hash": "799c27b7d5cbf07d77b3f02a291dbbb981c806a425ea367258c1c9f5a451c974"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pastebin.py_TestPaste_TestPaste.mocked_urlopen.return.calls": {"doc_hash": "b7ae51203f73745e09695a242a2f05ea531106da4db247d84c7c79f4aa46e485"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pastebin.py_TestPaste.test_create_new_paste_": {"doc_hash": "d66392acc392ed0368b5889e77ca587262db6f518510d4a94ec8f122e8bad043"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pathlib.py_sys_TestPort.if_sys_platform_win32.else_.drv2._d_": {"doc_hash": "c2fa522775673adc7df4df52739f1d92eb6a139f0c2d785315491bb8d66a9820"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pathlib.py_TestPort.test_matching_TestPort.test_matching.assert_match_pattern_pat": {"doc_hash": "f3676a6edfa54fda7be63e321e8a5da7aa9d9799d76b7faeff609a30fbb16bca"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pathlib.py_TestPort.test_not_matching_": {"doc_hash": "47fa6b6611a656cdf129789990902eafae96a4090993ebf583221ca24735fe77"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_from___future___import_ab_custom_pdb_calls.return.called": {"doc_hash": "363cba02d31d2357985d87cb8568db3bf7dec00c3c7976203f3880f2e37b35a2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_custom_debugger_hook_custom_debugger_hook.del__pytest__CustomDebugg": {"doc_hash": "5748abd824c3624101498859b799f9f86624f1121ca29978d69a1ae51934a0cd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB_TestPDB.flush.if_child_isalive_.child_wait_": {"doc_hash": "49dda30d2c173a71a936aa82af3097c52f28904c5a474b8d3ad8c3dd545b0c99"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_unittest_postmortem_TestPDB.test_pdb_unittest_postmortem.self_flush_child_": {"doc_hash": "ee4bae3b0a9da219a722eb70764e3718401d80d452bdbaade3941cb86b11eeee"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_unittest_skip_TestPDB.test_pdb_unittest_skip.self_flush_child_": {"doc_hash": "0c112906835b6b8032763d88af2920569a480402a993577eb8886a2ac3524ba5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_print_captured_stdout_and_stderr_TestPDB.test_pdb_print_captured_stdout_and_stderr.self_flush_child_": {"doc_hash": "e06a6cf96fb5d876f3d80bcf164b26ffd588a91faf1758de8def6349ca5a293d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_dont_print_empty_captured_stdout_and_stderr_TestPDB.test_pdb_dont_print_empty_captured_stdout_and_stderr.self_flush_child_": {"doc_hash": "2c90f4fe7867bbfeaeb37ebc545c4991135a0bbf94cb26ea43ffd45b0f78ed6e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_print_captured_logs_TestPDB.test_pdb_print_captured_logs.self_flush_child_": {"doc_hash": "82640f1ac69f5d72b172784343ad75cbdd4f2fb7db03da5d4d08e49a2561117d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_print_captured_logs_nologging_TestPDB.test_pdb_print_captured_logs_nologging.self_flush_child_": {"doc_hash": "aa96dbd5341ea184b86ddf0122c420250545e9cde8383c582684a87a12bc981b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_interaction_exception_TestPDB.test_pdb_interaction_on_collection_issue181.self_flush_child_": {"doc_hash": "6f2c147ad69ce498872b901ca269207abaa8bf9f6ab60dbb31fc42ba36c86002"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_interaction_on_internal_error_TestPDB.test_pdb_interaction_on_internal_error.self_flush_child_": {"doc_hash": "074b4458c9f9e3b35dd88cdfced540985c79382e8584b9b1f0f35912515324d6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_interaction_capturing_simple_TestPDB.test_pdb_interaction_capturing_simple.self_flush_child_": {"doc_hash": "54ddb0cd256cec6ce3cb819e88276a7878316df2d0b63bb35f60cbbcd2bdfd05"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_set_trace_kwargs_TestPDB.test_pdb_set_trace_kwargs.self_flush_child_": {"doc_hash": "28340d28ef530a9956e4d73d1916ad8c077b37e286b6a69111f0baf50674f6a8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_set_trace_interception_TestPDB.test_pdb_and_capsys.self_flush_child_": {"doc_hash": "cacf8c59740fc7df8bba786a9747a25cb275e010dbbe2e7676c11eddff2c3182"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_with_caplog_on_pdb_invocation_TestPDB.test_pdb_with_caplog_on_pdb_invocation.self_flush_child_": {"doc_hash": "ab0e8d9b2e973ae776aa38afa0fada2bb1d29b47a9392d6b119f7f340b4c26b0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_set_trace_capturing_afterwards_TestPDB.test_set_trace_capturing_afterwards.self_flush_child_": {"doc_hash": "1ba2c5a8beb3d9309c1458277afc79b5ebf77ae40b9de870fda8d095dd678879"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_interaction_doctest_TestPDB.test_pdb_interaction_doctest.self_flush_child_": {"doc_hash": "e9286583a0edc5331ba4a23a48cd412b3324b9b301d029c2c7411d0157c8ccfd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_interaction_capturing_twice_TestPDB.test_pdb_interaction_capturing_twice.self_flush_child_": {"doc_hash": "65adc472c698314acd15fc20b7eab860b539f419f5708a7f6531da93d23c1f1c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_with_injected_do_debug_TestPDB.test_pdb_with_injected_do_debug.self_flush_child_": {"doc_hash": "abde085e2e25fc1a4d59fc4005814d931a3e85f3a3bfd79354cff1d2063602cf"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_without_capture_TestPDB.test_pdb_without_capture.self_flush_child_": {"doc_hash": "ebf39128e49a568fd5d03865368280c86afb293e36faa117d1d1bb2bc321aa11"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_continue_with_recursive_debug_TestPDB.test_pdb_continue_with_recursive_debug.assert_1_passed_in_in_r": {"doc_hash": "2ba866d272a656282ef59efdf752236dd177e23f23d6f9ea8a0776e8fc33cc92"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_used_outside_test_TestPDB.test_pdb_collection_failure_is_shown.result_stdout_fnmatch_lin": {"doc_hash": "ab6d4e9e3b9c3ec30d5f5eb392624789f1dcf405a06ac1f8334a510461cd4042"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_enter_leave_pdb_hooks_are_called_TestPDB.test_enter_leave_pdb_hooks_are_called.self_flush_child_": {"doc_hash": "49fa2512eeb5edb05445558f9d5e72c0acf5782bdcbb8c4c4fd27a4c0ae0bb6e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_custom_cls_TestPDB.test_pdb_custom_cls_without_pdb.assert_custom_pdb_calls_": {"doc_hash": "0e3004b7f422ae3ef31d553a46b326961c76bf34c2182cd1cf74324c90678cc0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_custom_cls_with_settrace_TestPDB.test_pdb_custom_cls_with_settrace.self_flush_child_": {"doc_hash": "e3c42588fa0165fbc357ce8d8e74e9402f0dbfe648478dd10f15ea4714df27f3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestDebuggingBreakpoints_TestDebuggingBreakpoints.test_supports_breakpoint_module_global.None_2.assert_SUPPORTS_BREAKPOIN": {"doc_hash": "483c3afef34a8c7247edc6e2174a3c3299b0c65dc0bb68df833f5bf68f4d0245"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestDebuggingBreakpoints.test_sys_breakpointhook_configure_and_unconfigure_TestDebuggingBreakpoints.test_sys_breakpointhook_configure_and_unconfigure.result_stdout_fnmatch_lin": {"doc_hash": "c5294f6eac227f4156afb57f54f0b6ce49bca20b8d69dcd20157e462b1c3b9bb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestDebuggingBreakpoints.test_pdb_custom_cls_TestDebuggingBreakpoints.test_pdb_custom_cls.assert_custom_debugger_ho": {"doc_hash": "a9302141ac173d54763b30c7d292068d1dbdd79041de77d808734d3a85709190"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestDebuggingBreakpoints.test_environ_custom_class_TestDebuggingBreakpoints.test_environ_custom_class.result_stdout_fnmatch_lin": {"doc_hash": "8b4b9fda0586ecf272690f330cc4ffecda530158dfb172ed4eacf09a99b2edd1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestDebuggingBreakpoints.test_sys_breakpoint_interception_TestDebuggingBreakpoints.test_sys_breakpoint_interception.TestPDB_flush_child_": {"doc_hash": "861e2a85db6122496c88f9d12b9b1f6de223abedc55ba174fb425a9d4cf22f55"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestDebuggingBreakpoints.test_pdb_not_altered_TestDebuggingBreakpoints.test_pdb_not_altered.TestPDB_flush_child_": {"doc_hash": "9d24d8551ee0d2f673d0f4130c54e46c84c0d56407aadcce35dcedc01619e9da"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestTraceOption_TestTraceOption.test_trace_sets_breakpoint.TestPDB_flush_child_": {"doc_hash": "55c81d7cff4d2f00e9dbf0cd5593da7a1fca50082dfbfe1801aed7845c1f931a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_test_quit_with_swallowed_SystemExit_test_quit_with_swallowed_SystemExit.TestPDB_flush_child_": {"doc_hash": "b05fb268af10273a5d20b85888d894c100277789f4dcc6aebba36ee2220b621c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_test_pdb_suspends_fixture_capturing_test_pdb_suspends_fixture_capturing.assert_PDB_continue_I": {"doc_hash": "86325b05b2e04e96e4c106eb815fbf94931416d2d7c30187557c13d0124b30a5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py__encoding_UTF_8_TestPytestPluginInteractions.test_addhooks_conftestplugin.assert_res_11_": {"doc_hash": "ead8e4e9c8a3c39d2f60780b3271c804d1a654b5c3c9a69d861228aa457358bc"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginInteractions.test_addhooks_nohooks_TestPytestPluginInteractions.test_do_option_postinitialize.assert_config_option_test": {"doc_hash": "2d7d489d2dbc32892784be887927dbbe9e0509c68366992d7b205bb6ea563c4c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginInteractions.test_configure_TestPytestPluginInteractions.test_configure.None_4": {"doc_hash": "90a1dc7010906c2aa08f5aa8f6c01f56309bd3440b6a8f35264899acc5ebcfe5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginInteractions.test_hook_tracing_TestPytestPluginInteractions.test_hook_tracing.try_.finally_.undo_": {"doc_hash": "7436f989d4d5d127517159ea5e1fd610d746b62dd7d96dbf48f0f4c123fb395f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginInteractions.test_hook_proxy_TestPytestPluginInteractions.test_hook_proxy.assert_ihook_a_is_not_iho": {"doc_hash": "8a5afce155f9e979f08f43e7d4c2a684e4fddd7f74507dcaa50e0c02c0357664"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_test_default_markers_test_importplugin_error_message.assert_in_test_traceback": {"doc_hash": "17e8d90c97d7607f15ffd7fef2c5c4261fbbd66498ee455f257a936f1314b4dd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManager_TestPytestPluginManager.test_canonical_import.assert_pm_is_registered_m": {"doc_hash": "08d827ad7e5697e98104e398fe6def8702ff5094b5a4e3b349604505623065c5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManager.test_consider_module_TestPytestPluginManager.test_consider_module.None_1": {"doc_hash": "b8fb80d2e7dfe62b1ec02730ccde8c51588a0cf7d7b7c0c874d8c2d6145c2a9e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManager.test_consider_module_import_module_TestPytestPluginManager.test_consider_module_import_module.assert_len_values_1": {"doc_hash": "c87786a63fd1a0281097e7078bf0857fa6c38c993e769075f319041e46ca107c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManager.test_consider_env_fails_to_import_TestPytestPluginManager.test_plugin_skip.result_stdout_fnmatch_lin": {"doc_hash": "afce93378fdaa01072d6f9a440aeadccc8aae87e8a009745730a46cd25b5961c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManager.test_consider_env_plugin_instantiation_TestPytestPluginManager.test_consider_env_plugin_instantiation.assert_l2_l3": {"doc_hash": "aeb960b9d256cb45b48566f1e2045fdfd2b4d3b1175920fcc7da33257fc988cb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManager.test_pluginmanager_ENV_startup_TestPytestPluginManager.test_pluginmanager_ENV_startup.result_stdout_fnmatch_lin": {"doc_hash": "1d2c3de2050748413e5a939f6976bb6c3870c1c4fdbc809c58a99610087f39c4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManager.test_import_plugin_importname_TestPytestPluginManager.test_import_plugin_importname.assert_plugin2_is_plugin1": {"doc_hash": "cf4777304d55d24459d8240d5ab4694bdcf0a10728c1d70ab831ea0832a7091e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManager.test_import_plugin_dotted_name_TestPytestPluginManager.test_consider_conftest_deps.with_pytest_raises_Import.pytestpm_consider_conftes": {"doc_hash": "280de4f527811408ec54a3e11433130e310a81dbdc975866560da9cdadfe5f05"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManagerBootstrapming_TestPytestPluginManagerBootstrapming.test_preparse_args.with_pytest_raises_UsageE.pytestpm_consider_prepars": {"doc_hash": "78522d53c79f11124711f134c57a8ede5313fe51ac75fa2102ca697ba8e5c08d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManagerBootstrapming.test_plugin_prevent_register_TestPytestPluginManagerBootstrapming.test_plugin_prevent_register_unregistered_alredy_registered.assert_42_not_in_l2": {"doc_hash": "9e7c55705d0d9ec28394be9e83c053210272caf2572784c2a963d2471efa9c2f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManagerBootstrapming.test_plugin_prevent_register_stepwise_on_cacheprovider_unregister_": {"doc_hash": "506d11d65c89485c6428286210593bb1adadc2ccfb19089c7ccf700fb4ddfaa5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py__coding_utf_8__None_18": {"doc_hash": "2826ec0fd455da17727796cfe996278701a784913d44e6676081b5e88753d0f8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_make_hook_recorder_test_make_hook_recorder.pytest_raises_ValueError_": {"doc_hash": "d845dbae9a347a57552f628ec540bdf00ec3921148861568e8a583240aa22bda"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_parseconfig_make_holder.return.apiclass_apimod": {"doc_hash": "5c3d08a9f8813d07b256b8f4e34ebc1e5fb99018d8ff18c544d69d2c14c8210d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_hookrecorder_basic_test_hookrecorder_basic.None_2": {"doc_hash": "1aed9aea5c3d88f227279b53d37c1fcaee0c636bb5c07980b7005286891f208b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_makepyfile_unicode_test_makepyfile_utf8.assert_u_mixed_encoding_": {"doc_hash": "7837cb0e9df560b58ba5ccb0054f75d7e5b4c254b60fc4babc36f1192c10a643"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_TestInlineRunModulesCleanup_TestInlineRunModulesCleanup.spy_factory.return.SysModulesSnapshotSpy": {"doc_hash": "880c791e2cdc01360bfba5c9d2d5c8a3881b168be0b5529f8b0d4c5ac641055a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_TestInlineRunModulesCleanup.test_inline_run_taking_and_restoring_a_sys_modules_snapshot_TestInlineRunModulesCleanup.test_inline_run_taking_and_restoring_a_sys_modules_snapshot.assert_all_sys_modules_x_": {"doc_hash": "cb0cebd09ebe88247c1192c14a8f5d4a4295387c6c6443567766e846a8871462"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_TestInlineRunModulesCleanup.test_inline_run_sys_modules_snapshot_restore_preserving_modules_TestInlineRunModulesCleanup.test_external_test_module_imports_not_cleaned_up.assert_imported_data_4": {"doc_hash": "d90497e370866842dd295b485f9da08235e72bb48d542b278ae5f87705f06578"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_assert_outcomes_after_pytest_error_test_cwd_snapshot.None_1": {"doc_hash": "e0608463413bf5cbc2de8e73c5dd7a2eca071b5b2f2d577aea411f035fb57be6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_TestSysModulesSnapshot_TestSysModulesSnapshot.test_restore_reloaded.assert_sys_modules_ori": {"doc_hash": "725ceca8de8a1487beabacba875ce63cbadd79d1d84d6a06dd904a8fe5af38ea"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_TestSysModulesSnapshot.test_preserve_modules_TestSysModulesSnapshot.test_preserve_container.assert_sys_modules_ori": {"doc_hash": "49322f6ef45b4b19082f4698e11d4f12672b1c39774fb79786707e4128aa4414"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_TestSysPathsSnapshot_TestSysPathsSnapshot.test_restore.assert_getattr_sys_other": {"doc_hash": "fac5115530b473d4abe2f78fda890e70373395d6c54dd2bbc5d22414ed8fbe25"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_TestSysPathsSnapshot.test_preserve_container_TestSysPathsSnapshot.test_preserve_container.None_3": {"doc_hash": "e3efeda6d82357405eeee343479b5eede1739d889d79bda9f462b5d9f26848cf"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_testdir_subprocess_test_pytester_addopts.assert_os_environ_PYTEST": {"doc_hash": "3d90e79dd2d4c257b4d8c5b2ad4b8e53a62f9122f85980195118bd9077ab2557"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_run_stdin_test_run_stdin.assert_result_ret_0": {"doc_hash": "684cfd399d3a08689f2058ecda3857df4e8eb399d81320f9a93c64b7a5f7edd8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_popen_stdin_pipe_test_popen_stdin_bytes.assert_proc_returncode_": {"doc_hash": "37cd6f61281cd3f765703add0a2ad689c237f338b81f644acfb8b4489be50ed8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_popen_default_stdin_stderr_and_stdin_None_": {"doc_hash": "83bddae393545dbe60fd883d4892a904ba9cc270e59fa0667c9e060f5833db4f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_from___future___import_ab_test_recwarn_functional.reprec_assertoutcome_pass": {"doc_hash": "a4e9d7262a74d2f1ee1a86e9ee0fb66b73ace662bcc40176d104a0c6513f7519"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarningsRecorderChecker_TestWarningsRecorderChecker.test_recording.with_rec_.pytest_raises_AssertionEr": {"doc_hash": "15d897227a7ec9046b9560292a3f3426daa083dcc04841a629ee13e4be8da1ff"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarningsRecorderChecker.test_warn_stacklevel_TestWarningsRecorderChecker.test_invalid_enter_exit.with_WarningsRecorder_.None_1.with_rec_.with_rec_._can_t_enter_twice": {"doc_hash": "65adc0e14b4fcd86aa85017d1d405b8d9f2fd302f9c495831c09b252788576f0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestDeprecatedCall_TestDeprecatedCall.test_deprecated_explicit_call.None_1": {"doc_hash": "f93394439d66ff6ef5207ffd9f446b393216224a5c508e41978f279eb91df31e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestDeprecatedCall.test_deprecated_call_no_warning_TestDeprecatedCall.test_deprecated_call_no_warning.with_pytest_raises_pytest.if_mode_call_.else_.with_pytest_deprecated_ca.f_": {"doc_hash": "61d1e8f133f9740f25a46877c8bf3caf72a5ba0723eae29abad02f0df993e3ba"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestDeprecatedCall.test_deprecated_call_modes_TestDeprecatedCall.test_deprecated_call_modes.if_mode_call_.else_.with_pytest_deprecated_ca.assert_f_10": {"doc_hash": "4cd1f2072093acdc92f7964f2532030e3bfacc7ff72bd16abb356b339156d598"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestDeprecatedCall.test_deprecated_call_exception_is_raised_TestDeprecatedCall.test_deprecated_call_supports_match.with_pytest_raises_pytest.with_pytest_deprecated_ca.warnings_warn_this_is_no": {"doc_hash": "e629720111387ffc9eb1a41fc5f2e05322ceca4bf0a5b7aeafb219c250633922"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarns_TestWarns.test_strings.for_w_in_warninfo_.assert_msg_startswith_wa": {"doc_hash": "cc6ddb7bcc95ee40fc1552db41f4434fa12404b02b7b9b485327c32ba2798190"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarns.test_function_TestWarns.test_warning_tuple.pytest_raises_": {"doc_hash": "aa9fe588257fb246315f12d2b092a6324d4d811f82eaff3af528b29c78a77e2c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarns.test_as_contextmanager_TestWarns.test_as_contextmanager.None_3": {"doc_hash": "ef23d4ad113a7c749cb7b2b9156b61d26dc13a429e8b51bdd1cc01feca4a9628"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarns.test_record_TestWarns.test_record_only.assert_str_record_1_mess": {"doc_hash": "4ba47ec5de3d0df2c18b6be7bda0af91f35e9e1a5b79d8e81d2135f0e48bdc73"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarns.test_record_by_subclass_TestWarns.test_record_by_subclass.None_5": {"doc_hash": "c7604df4be301ae13c00d3472ea2d150490f2c92525711baf56919a0f6fbb727"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarns.test_double_test_TestWarns.test_match_regex.None_2.with_pytest_warns_FutureW.warnings_warn_value_must": {"doc_hash": "463573e09d84b92e52099b3bf319810974ec576cf5fcad53841849bcb5d183cc"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarns.test_one_from_multiple_warns_": {"doc_hash": "b115e50426e061e5ea83f11799cceeb8ff3c855e339e72829b5ce32005c79a6b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_pytest_TestReportSerialization.test_xdist_longrepr_to_str_issue_241.assert_test_b_call__to_js": {"doc_hash": "2a31a3d4d1e44570506b5489df737352bd809d69f054fc6a46166524157ba685"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_xdist_report_longrepr_reprcrash_130_TestReportSerialization.test_xdist_report_longrepr_reprcrash_130.assert_added_section_in_a": {"doc_hash": "ed682b6af8770ad5fcc4f88a68bace06a503ffd5ee0857462147911100687eb6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_reprentries_serialization_170_TestReportSerialization.test_reprentries_serialization_170.for_i_in_range_len_a_entr.assert_rep_entries_i_sty": {"doc_hash": "35a5bc58f93559c70310e398cca9d97858db76a2d34ba3a89f3802cb3e216b1c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_reprentries_serialization_196_TestReportSerialization.test_reprentries_serialization_196.for_i_in_range_len_a_entr.assert_rep_entries_i_lin": {"doc_hash": "0e54a983b2fddcdd717efcf8bd8cebfc27b4a319ecac458f3e666504898b7407"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_itemreport_outcomes_TestReportSerialization.test_itemreport_outcomes.for_rep_in_reports_.if_rep_failed_.assert_newrep_longreprtex": {"doc_hash": "8c3346b1794386ebb6c2ac45988d6731946ef871ed6d03b0ff43993336701fd3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_collectreport_passed_TestReportSerialization.test_collectreport_passed.for_rep_in_reports_.assert_newrep_skipped_": {"doc_hash": "9bf005d4821c78f20e8bf18e7208befa9618c226ac9e5d1edc654b16f96a296f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_collectreport_fail_TestReportSerialization.test_collectreport_fail.for_rep_in_reports_.if_rep_failed_.assert_newrep_longrepr_": {"doc_hash": "6f388eb89826190f926c4bf19ed1285f57062f2bcadf95b1b3e1abe6df5ef911"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_extended_report_deserialization_TestReportSerialization.test_extended_report_deserialization.for_rep_in_reports_.if_rep_failed_.assert_newrep_longrepr_": {"doc_hash": "22e4dfbb857a06590941c516d8755bc544f8f9bd5f906c59e96c1739ba08d623"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_paths_support_TestReportSerialization.test_paths_support.assert_data_path2_s": {"doc_hash": "13085e166eb300cd68e42b21aca9c4aa68258151e6ca3265f872609e321951ad"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_unserialization_failure_TestReportSerialization.test_unserialization_failure.with_pytest_raises_.TestReport__from_json_dat": {"doc_hash": "ce6d8cb84df4b227f998a8c793a049859340941d265e560055f6f266a5a285d9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestHooks_TestHooks.test_test_report.for_rep_in_reports_.assert_new_rep_outcome_": {"doc_hash": "6c69c61b860be49dd15a2cfe1038aa557d56dfab5a09a6b004ec340ec59aca3e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestHooks.test_collect_report_TestHooks.test_collect_report.for_rep_in_reports_.assert_new_rep_outcome_": {"doc_hash": "dff450237c218a6f15f1811c6132ea3cf67aa19f37461ce842fb1a7acbc2ef38"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestHooks.test_invalid_report_types_": {"doc_hash": "df2a784644113901b2feedcc03fccd7bf3dc595ad85702c091b831b89bb15a55"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_resultlog.py_from___future___import_ab_test_write_log_entry.None_14": {"doc_hash": "7e144d0acfa273afc5431f0702b9314b7f1ebff6ef9bd16f252946c4ed9bbc97"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_resultlog.py_TestWithFunctionIntegration_TestWithFunctionIntegration.test_collection_report.assert_XXX_in_join_l": {"doc_hash": "75797239a5f2d72924ab610ee71b88d738133a01f4e2315b3be724e3df4b6063"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_resultlog.py_TestWithFunctionIntegration.test_log_test_outcomes_TestWithFunctionIntegration.test_log_test_outcomes.assert_len_lines_15": {"doc_hash": "6b26f64209848ceeaf1607e3e10ab98658f70d0441e4168a244921bf45c584d5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_resultlog.py_TestWithFunctionIntegration.test_internal_exception_TestWithFunctionIntegration.test_internal_exception.assert_ValueError_in_en": {"doc_hash": "c9348ba78708fbbedbef66f67145917c8ac01d985ce70bc7ed220d28ecb00e22"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_resultlog.py_test_generic_test_generic.LineMatcher_lines_fnmatc": {"doc_hash": "3089e6258511b0ca7eca8048e65769724c39987201afa13a4bc1c9d84a90cfbb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_resultlog.py_test_makedir_for_resultlog_test_no_resultlog_on_slaves.None_4": {"doc_hash": "061aca5fd3459b8d8d0223c1fd8453294dd8153b07df2a6f01721e429782f28f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_resultlog.py_test_failure_issue380_": {"doc_hash": "1f7b482497bcd466f2af3c5efb580e873e9947db35d0909664ad4a385706e856"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py__coding_utf_8__TestSetupState.test_setup_fails_and_failure_is_cached.None_1": {"doc_hash": "f993bf310fb4efd27c6fc026c2af2e9114c72fb608f0581e1444b035afaccf5f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_TestSetupState.test_teardown_multiple_one_fails_TestSetupState.test_teardown_multiple_one_fails.assert_r_fin3_fin": {"doc_hash": "05a8a6263a519386f3bc3e19873964904998ca89f08c8c4f267b1926abc89a20"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_TestSetupState.test_teardown_multiple_fail_TestSetupState.test_teardown_multiple_fail.assert_err_value_args_": {"doc_hash": "ff6f54935255bb1fe830d6a4190118c51ee2d0e5a92a2655ad562cb8c3f3069b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_TestSetupState.test_teardown_multiple_scopes_one_fails_TestSetupState.test_teardown_multiple_scopes_one_fails.assert_module_teardown": {"doc_hash": "086343c40b8e3ed865f6fb17c5eed31790cbec25f8da076d155f770a82196f05"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests_BaseFunctionalTests.test_failfunction._assert_isinstance_rep_l": {"doc_hash": "2aeb00b62264ebbb3bb5dda843b26dcf71aa244135952dee8ef2a71913aa0211"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_skipfunction_BaseFunctionalTests.test_skipfunction._assert_not_rep_skipped_": {"doc_hash": "3bbf22b5d3a02ede58cc1195d08709833cba6165604c4c19e01c284dc58500d3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_skip_in_setup_function_BaseFunctionalTests.test_skip_in_setup_function._teardown": {"doc_hash": "5a15f54d26aab8f245f9eedbabbd177219ef4627f01332f5e8c71d11f3002527"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_failure_in_setup_function_BaseFunctionalTests.test_failure_in_teardown_function.None_1": {"doc_hash": "574080077575391ac4e033585041ff899d6a6a9456d6eecc7652983158d57784"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_custom_failure_repr_BaseFunctionalTests.test_custom_failure_repr._assert_rep_failed_failu": {"doc_hash": "d4fed15714c3f417339791ea76eb3fae3b60ba69d13cfd5d0c39e18c2b810b1e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_teardown_final_returncode_BaseFunctionalTests.test_logstart_logfinish_hooks.for_rep_in_reps_.assert_rep_location_": {"doc_hash": "35bd5c52db70e491a9d0a717322309faeedcbcd97445a2c162acbbeea6ea1053"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_exact_teardown_issue90_BaseFunctionalTests.test_exact_teardown_issue90.assert_reps_5_failed": {"doc_hash": "4cc9610d6121c536bcb24692e907df1c6befeae1827f1605acefd26848944f53"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_exact_teardown_issue1206_BaseFunctionalTests.test_exact_teardown_issue1206.assert_reps_2_longrepr_r": {"doc_hash": "9abe42fb6d11a19942081a2f15eea0c68f2e02d1b366e623b22b114f8954a971"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_failure_in_setup_function_ignores_custom_repr_BaseFunctionalTests.test_failure_in_setup_function_ignores_custom_repr._assert_instanace_rep_fa": {"doc_hash": "f36063e77517249f6b2e33253a1c237d93ff9cd035dc7f05b03626a2c9432b78"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_systemexit_does_not_bail_out_BaseFunctionalTests.test_exit_propagates.try_.else_.pytest_fail_did_not_rais": {"doc_hash": "86500d93b15273da3fccd9df837221dab302b564d2d163443643ddd7d4715dfb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_TestExecutionNonForked_TestExecutionForked.test_suicide.assert_rep_when_": {"doc_hash": "8c3d537ad0e6e35f1f870e7651e02b048d70705c91f56b531f11352e1c6f9d78"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_TestSessionReports_TestSessionReports.test_collect_result.assert_res_1_name_Te": {"doc_hash": "8ab94b68d94136d8aa8caf48ae187fbdcb4a4f1e423f34077736fcd9e151d693"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_reporttypes_test_report_extra_parameters.assert_report_newthing_": {"doc_hash": "07ee894f3137a1fd24d0efb3bc0ee0aba283a7569d67aa0cf317976f01601dd4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_callinfo_test_callinfo.assert_exc_in_repr_ci_": {"doc_hash": "30188ba0d3e311b5b1882d45ff20253c480e060e02c18aabde8bae1c4ca40fc5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py__design_question_do_we__test_runtest_in_module_ordering.result_stdout_fnmatch_lin": {"doc_hash": "f51f958a3a5c68b3aef59ed1cf63de633e4b7cd431e4d9b13ac6fa38fae5dec9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_outcomeexception_exceptionattributes_test_pytest_exit_msg.result_stderr_fnmatch_lin": {"doc_hash": "d84a65568834d76e7878ff572e7a847fca8a260fc294cc21adf8f0a7a5d51160"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_pytest_exit_returncode_test_pytest_exit_returncode.assert_result_ret_98": {"doc_hash": "07368f345d5a0ab3bcf16195da6bd7dfa1754b68885afc47e72555d35794df05"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_pytest_fail_notrace_runtest_test_pytest_fail_notrace_collection.assert_def_some_internal": {"doc_hash": "4802a50a25f7a6906878fffe2f46bc91a8c3da049b38377522d46d90bab3f9ec"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_pytest_fail_notrace_non_ascii_test_pytest_fail_notrace_non_ascii.assert_def_test_hello_n": {"doc_hash": "db0825290aff1c607d6d45a398637f6c59ab4427b89a8d8064986bb035de1137"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_pytest_no_tests_collected_exit_status_test_exception_printing_skip.try_.except_pytest_skip_Except.assert_s_startswith_Skip": {"doc_hash": "60d80a58dcb7e26f084bc60572b72bd5be0521811ea996a353bb6172a86a6b91"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_importorskip_test_importorskip.try_.except_pytest_skip_Except.pytest_fail_spurious_ski": {"doc_hash": "0c101cabe99876faf4bb73dfdcc21aa3f85181e78a0b494f2a520d6049165ead"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_importorskip_imports_last_module_part_test_importorskip_dev_module.try_.except_pytest_skip_Except.pytest_fail_spurious_ski": {"doc_hash": "dee9dc24b249b099ca2231010bc799d73ed42d5b3118a9b1e0ebafc64f01c0f5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_importorskip_module_level_test_pytest_cmdline_main.assert_ret_0": {"doc_hash": "a4f87fea60a92d48c9fa9f414d8c49782df90436455ab9a75a1acf2327a4928c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_unicode_in_longrepr_test_unicode_in_longrepr.assert_UnicodeEncodeErro": {"doc_hash": "3e7c3d2dfa772e7ec8ae1e3ab2d3d89dc546d82e3fc137bd9aeb94aee2af742e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_failure_in_setup_test_makereport_getsource.result_stdout_fnmatch_lin": {"doc_hash": "03f1c6ddf90465da5ce5c6fb3aecd8ea51b633f6a038d4b182e5828f51d9b2e6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_makereport_getsource_dynamic_code_test_makereport_getsource_dynamic_code.result_stdout_fnmatch_lin": {"doc_hash": "f73f8f6a6d5595ccbd43b54d6569887801fcc114ecb69f08cd84ad2d595d4369"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_store_except_info_on_error_test_store_except_info_on_error.None_5": {"doc_hash": "7dab1bc00094bcd1bf41238b48527279f90c8c423ea12cd406d4e6425be9da61"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_current_test_env_var_test_current_test_env_var.assert_PYTEST_CURRENT_TE": {"doc_hash": "c4b77305c3b80f9724e3e14163d071fe116a095b5a9c2b042d575d1b04e650f6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_TestReportContents_TestReportContents.test_longreprtext_failure.assert_assert_1_4_in": {"doc_hash": "65a0b20c02d6bba69eacba72fadc536d73453bae22986b222fddec7ef9c4928e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_TestReportContents.test_captured_text_": {"doc_hash": "a1d8729f49d601536a54e7f96612c14826e5da6bdefe79f22349c91ea56ebc18"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py___test_module_and_function_setup.None_1": {"doc_hash": "6dd8ae716c2de6f6d399fc6089b38a5a228924c5751413479e7c200b859e4456"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py_test_module_setup_failure_no_teardown_test_setup_function_failure_no_teardown.assert_calls_0_item_modu": {"doc_hash": "ed20a7516b3e7ade705108a92e18cefadd2c35a4dd5c551f93af656f6a12d996"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py_test_class_setup_test_class_setup.reprec_assertoutcome_pass": {"doc_hash": "d1298884fcc43a26c77afc1bdc87ea016371fffc1d4e3dcd5d5c166834df61f4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py_test_class_setup_failure_no_teardown_test_class_setup_failure_no_teardown.reprec_assertoutcome_fail": {"doc_hash": "c6db1fc79fc3fc4fa11c5201790d2747b65ec131e7a9f0ab29f24ba575992279"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py_test_method_setup_test_method_setup.reprec_assertoutcome_pass": {"doc_hash": "773636b974f91e122d7da560a372083b8d9e77af1fa55c415d505e329266c2cf"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py_test_method_setup_failure_no_teardown_test_method_setup_failure_no_teardown.reprec_assertoutcome_fail": {"doc_hash": "85c11ab0e8345b48d730ac3c80adb9e671e4beb3a240e59c722444a57a07f2da"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py_test_method_setup_uses_fresh_instances_test_setup_fails_again_on_all_tests.reprec_assertoutcome_fail": {"doc_hash": "e677f4a7f4db3905d579c57c3d60f50d0f1b69a7a6f4d9f5c47f0ebf76590647"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py_test_setup_funcarg_setup_when_outer_scope_fails_test_setup_funcarg_setup_when_outer_scope_fails.assert_xyz43_not_in_res": {"doc_hash": "b5fcc1a2e50f65215645313dafb0250ca1cc581c4eda0be6bb66f816dfa3b910"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py_test_setup_teardown_function_level_with_optional_argument_": {"doc_hash": "e9e1e2da0a53d4db647c966bf845bc372b11910d5532debf1eb7d8a79c985ad0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_from___future___import_ab_SessionTests.test_basic_testitem_events._assert_colreports_1_re": {"doc_hash": "516549f836da54f4b0b4fc9c8c43c494737d78f0e0e1d48b2be1ad664d4ff822"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_SessionTests.test_nested_import_error_SessionTests.test_nested_import_error.assert_out_find_does_not": {"doc_hash": "9dfe7d5a25f9c0588d41475b2d97df310c9b90d544c88cec42bd1bba26f8fed5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_SessionTests.test_raises_output_SessionTests.test_maxfail.assert_passed_skipped_": {"doc_hash": "f6480ac33763e05b40f8ad9516f2b4ed6544bf70bca3b48ef3b36d59836cf1d2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_SessionTests.test_broken_repr_SessionTests.test_broken_repr.assert_": {"doc_hash": "5738968db6046a3d11861412fa199a8049205dc34fd1482c154c6aa4507be9d0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_SessionTests.test_broken_repr_with_showlocals_verbose_SessionTests.test_broken_repr_with_showlocals_verbose.assert_repr_locals_lines_": {"doc_hash": "2300333160ce3c3ce315a6a64bb13fde6a18bdaf6f69e3ea328c3aa924cff9f9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_SessionTests.test_skip_file_by_conftest_SessionTests.test_skip_file_by_conftest.assert_reports_0_skipped": {"doc_hash": "11f1e7e0ae21272a3e007c59a1348c58560191ac9ad4aff47ca8e5965003a836"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_TestNewSession_TestNewSession.test_order_of_execution.assert_passed_7": {"doc_hash": "89c589ef2a78cbf10fa407429deaf1184e3316bda7cbdbf288ff2eb9a94fc01b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_TestNewSession.test_collect_only_with_various_situations_TestNewSession.test_collect_only_with_various_situations.assert_len_colfail_1": {"doc_hash": "5ebdd899a0ee3b858aa9c281decec576c045bc8b433b9d6e173e0825d46e82aa"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_TestNewSession.test_minus_x_import_error_TestNewSession.test_minus_x_overridden_by_maxfail.assert_len_colfail_2": {"doc_hash": "af1d045ed872371059606baaea6477eae10b8c2b23ccbbdc92c7e905708a8352"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_test_plugin_specify_test_exclude.result_stdout_fnmatch_lin": {"doc_hash": "52452026a75d92c73a539e4b6e34edb911330b2a73615209195935515b260bc1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_test_exclude_glob_test_exclude_glob.result_stdout_fnmatch_lin": {"doc_hash": "898f5ffe267aedf0704214d658c03f3e5a267e73b135e702ee45bd08ec828fd6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_test_deselect_test_sessionfinish_with_start.assert_res_ret_EXIT_NO": {"doc_hash": "aff9483785f3211af90c50cf1016a5e62d0de30e0fa1d4538ce564a1d41cf7c0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_test_rootdir_option_arg_": {"doc_hash": "f7b0a1c3cbebaddffc4ba3d729dfcf01859806f2e92ba86562412e77728a37e1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestEvaluator.test_marked_one_arg_unicode_TestEvaluator.test_marked_one_arg_unicode.assert_expl_condition": {"doc_hash": "5d758223b5d9a8f607df8c2b42fb827ead8e53046212ac2c17d4e76d6084b21f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestEvaluator.test_marked_one_arg_with_reason_TestEvaluator.test_marked_one_arg_with_reason.assert_ev_get_attr_": {"doc_hash": "394b20b5c069571913f45a12afdafe9cb779fb02278ba155eb7888cc23cc7491"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestEvaluator.test_marked_one_arg_twice_TestEvaluator.test_marked_one_arg_twice.for_i_in_range_0_2_.assert_expl_condition": {"doc_hash": "d99a31c6a11c675a259c06bc12904b6077ac04c90aa9aa7e7c8935bf73d57956"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestEvaluator.test_marked_one_arg_twice2_TestEvaluator.test_marked_one_arg_twice2.assert_expl_condition": {"doc_hash": "fb79afc4a2595c4831f99a6d05dd83cdb11d21e360790b79f3ea9adefa4c9f08"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestEvaluator.test_marked_skip_with_not_string_TestEvaluator.test_skipif_class.assert_expl_condition": {"doc_hash": "da4be841949b233bee1fd195e2705a1b0f3056aca3b247732d74b97c12c530a2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail_TestXFail.test_xfail_simple.assert_callreport_wasxfai": {"doc_hash": "d5757eb5af2c471a2a5a55c4c30640f3dd527a23d2544d416b439148d9d5f509"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_xpassed_TestXFail.test_xfail_using_platform.assert_callreport_wasxfai": {"doc_hash": "76e5720ea12d01d7ac3e33afe1462f09443af83e812774a8ed0688a88442ee24"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_xpassed_strict_TestXFail.test_xfail_xpassed_strict.assert_not_hasattr_callre": {"doc_hash": "409ca583dd8a48564414af7daadcfacf2249f896a5af4cff57d29f3e16353540"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_run_anyway_TestXFail.test_xfail_not_report_default._": {"doc_hash": "5cd5661bdc45758cb7a2816f20fd90ae8df8cb4a34289d0943e97faa87202784"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_not_run_xfail_reporting_TestXFail.test_xfail_not_run_xfail_reporting.result_stdout_fnmatch_lin": {"doc_hash": "5f7d9d6cc7abadb830046d32127dcbb964f9f61b9c43056b46c19f5ae1a6eff2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_not_run_no_setup_run_TestXFail.test_xfail_xpass.assert_result_ret_0": {"doc_hash": "e9f044ab455ae96d045df40443ae84e5b6ee84bf9a4e9c4e9e024aee2cdd21de"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_imperative_TestXFail.test_xfail_imperative.None_2": {"doc_hash": "32af431ee70f00271e87380c60d4cccc25c8b10383f50281d2a160ca78a6744a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_imperative_in_setup_function_TestXFail.test_xfail_imperative_in_setup_function.None_2": {"doc_hash": "75be090e4ae5080958d8f54ec8c66215f05fdc7472c916b4033ccbe6291bba51"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.xtest_dynamic_xfail_set_during_setup_TestXFail.test_dynamic_xfail_set_during_funcarg_setup.result_stdout_fnmatch_lin": {"doc_hash": "057d10e960eff6ed54f280889ddc9163adeccae03f420f7e586367a124b29c44"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_raises_TestXFail.test_xfail_raises.result_stdout_fnmatch_lin": {"doc_hash": "b39fc70babe5856cf237d5edf6592b278e120c0d9e9c6b8616f26d727442d0e1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_strict_sanity_TestXFail.test_strict_sanity.assert_result_ret_0": {"doc_hash": "17d1fab2ab3b8b019eccb59dca09197a31cb58735a0d30256fc4fee558bf5c78"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_strict_xfail_TestXFail.test_strict_xfail.assert_testdir_tmpdir_joi": {"doc_hash": "7e02e3e3b71ee5a7c5cea82bf93fe8b8f04e6eccb7a7506f1307132ea5d5a972"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_strict_xfail_condition_TestXFail.test_strict_xfail_condition.assert_result_ret_0": {"doc_hash": "5de0257fe23576c26f4b55b31821d1113efce49d44956365b091e3ee3d2fb78e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_condition_keyword_TestXFail.test_xfail_condition_keyword.assert_result_ret_0": {"doc_hash": "bcdc96a218f2c372d858db6c4ffa1595768b9c8e5393ed31df7e07bd2707dc18"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_strict_xfail_default_from_file_TestXFail.test_strict_xfail_default_from_file.assert_result_ret_1_i": {"doc_hash": "79dfb5916f370df630c6f487ba06642a233d75fb7b930cefa522726ad097016a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFailwithSetupTeardown_TestXFailwithSetupTeardown.test_failing_teardown_issue9.result_stdout_fnmatch_lin": {"doc_hash": "9bee9852fd9c17b560b8c47a65f44c7e165e901399441e3abfa5d5f1ed2a84ef"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestSkip_TestSkip.test_skip_with_reason.result_stdout_fnmatch_lin": {"doc_hash": "3ddaecdd3943915063dad9b63d171ccc5682882a24eec5a7641c651c6e850bcb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestSkip.test_only_skips_marked_test_TestSkip.test_strict_and_skip.result_stdout_fnmatch_lin": {"doc_hash": "dff1354427ae7e70e781a2fb732ab93bb8a2cd2cf14febacdcbc5cda293b9033"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestSkipif_TestSkipif.test_skipif_reporting.assert_result_ret_0": {"doc_hash": "8c281923457d56da61c2b785844cbc3e82019da360acd8be2bf3b767e9bbe99f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestSkipif.test_skipif_using_platform_TestSkipif.test_skipif_reporting_multiple.assert_result_ret_0": {"doc_hash": "b5a585223355b424c68224d17c31dedab9d3c91e858d40a812f2464bea9dfe42"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_skip_not_report_default_test_skipif_class.result_stdout_fnmatch_lin": {"doc_hash": "e555125ef21121a1c18955f662e1165c1d7c1755e24a8798d655b84e0da9c752"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_skipped_reasons_functional_test_skipped_reasons_functional.assert_result_ret_0": {"doc_hash": "c71db6539e39b3a55e8cce776b08a490e078441b9250beb69b6651f463b336a4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_skipped_folding_test_skipped_folding.assert_result_ret_0": {"doc_hash": "febba46b76c3734c26061db21c94002396d0f4e5ac6e4d60851e79a05401f883"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_reportchars_test_reportchars_error.result_stdout_fnmatch_lin": {"doc_hash": "b554161fdede1f57903cc65d2991e6cd3fe2be2114a24c565ca7745dd0641e14"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_reportchars_all_test_reportchars_all_error.result_stdout_fnmatch_lin": {"doc_hash": "93557cc4b067d5884d8e9c9b44c9fe2ecf8e5c066ef40f7b392f45996d540910"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_errors_in_xfail_skip_expressions_test_errors_in_xfail_skip_expressions.result_stdout_fnmatch_lin": {"doc_hash": "bfd08f09f1f717feb71e6d3c82d678b026109d19a5dc005ba22d926afa3d64b9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_xfail_skipif_with_globals_test_xfail_skipif_with_globals.result_stdout_fnmatch_lin": {"doc_hash": "8d1c7af54263bb649de8d3707817c62d6d1a194abfa9ca882a551ad567c0d4bb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_direct_gives_error_test_default_markers.result_stdout_fnmatch_lin": {"doc_hash": "48425cda28f71b16081b23df9a147322b8fce8c4305a67e4b4b3d179ce6e093e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_xfail_test_setup_exception_test_xfail_test_setup_exception.assert_xpassed_not_in_r": {"doc_hash": "dd2fa6024f94bcfd8315f435f0c651b65859be81df7c1ea5b43a5aabbfee219e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_imperativeskip_on_xfail_test_test_imperativeskip_on_xfail_test.result_stdout_fnmatch_lin": {"doc_hash": "6f8a0ebd99653347f42fd1b8652c1e1bc4ce1c8f20b3dd52735d12ea0d388aa1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestBooleanCondition_TestBooleanCondition.test_xfail.result_stdout_fnmatch_lin": {"doc_hash": "29a3cbc941d0ef17e9fbaf60f7841bbe0bdf2abb063a430455e24a8889693ead"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_xfail_item_test_xfail_item.assert_xfailed": {"doc_hash": "d17f28931b8956dccacf1d4a160041f290da06622513fbcccabfc916d0744ba8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_module_level_skip_error_test_invalid_skip_keyword_parameter.result_stdout_fnmatch_lin": {"doc_hash": "d172acb3b8734ed5eede2d7b4b69802d9f59bfdd38890f31ba08e4c510d3db1e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_mark_xfail_item_test_mark_xfail_item.assert_xfailed": {"doc_hash": "12379303c064105926bf058fc873ea5f4b6e3bca66866f9bb42ed3871ec374a9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_summary_list_after_errors_": {"doc_hash": "1d17fc3aec133167c7a1c05ad40c4bcf3308de27a7fa6bd414b988495e89ec35"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_stepwise.py_pytest_stepwise_testdir.return.testdir": {"doc_hash": "a32394decf6d76352674fb8ff2de292cae1b264f63e9b36dbb98261b19e6e9dc"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_stepwise.py_error_testdir_test_run_without_stepwise.None_2": {"doc_hash": "d16d99eaf48db7198d24763a276075fee5f4af20f2ded7f3b85c18f9bc06ef70"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_stepwise.py_test_fail_and_continue_with_stepwise_test_fail_and_continue_with_stepwise.None_7": {"doc_hash": "a31192f6eb2e9c33ac6bba8d92eeab5ad245528fec8d867d56051dd5de105b71"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_stepwise.py_test_run_with_skip_option_test_fail_on_errors.assert_test_success_afte": {"doc_hash": "9e93be3d3b37b7c3a25ae9d314a2e1002aba572dea8121213723359915aa9d26"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_stepwise.py_test_change_testfile_": {"doc_hash": "354a651647cbfdd0c3cfaa471a82ff45604759d4b13454c04264af79e8bef0bb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_plugin_nameversion_test_plugin_nameversion.assert_result_expected": {"doc_hash": "48938370b4346b12da52733a111cfa3acb9dc8e9513bfe5e2262c2f0ebc7014e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminal_TestTerminal.test_pass_skip_fail.result_stdout_fnmatch_lin": {"doc_hash": "6b6e56d491b15b90c1a947ae6aaaa99cf50b4d749292fd4ae6520db2f7ff4002"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminal.test_internalerror_TestTerminal.test_writeline.assert_lines_2_hello": {"doc_hash": "2094028254c745975f44fe974510595953425c435d37cfa0d07a6639299a38cf"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminal.test_show_runtest_logstart_TestTerminal.test_runtest_location_shown_before_test_starts.child_kill_15_": {"doc_hash": "11a82e84aa0d6271447840f17e8ebf96411c06e85400e3f8954d2c491dba3898"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminal.test_report_collect_after_half_a_second_TestTerminal.test_report_collect_after_half_a_second.assert_2_passed_in_in_r": {"doc_hash": "7fa907134d52b43344d5994fb4e1b644c9ca82c6a68ae43e0f88cff50a21b2ed"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminal.test_itemreport_subclasses_show_subclassed_file_TestTerminal.test_itemreport_subclasses_show_subclassed_file.None_2": {"doc_hash": "b1b1feac2e402f89126368ccb37afc0a48196546ed136e959798f631f38cb677"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminal.test_itemreport_directclasses_not_shown_as_subclasses_TestTerminal.test_itemreport_directclasses_not_shown_as_subclasses.assert_not_in_resu": {"doc_hash": "0587f9b1b58e64d5805e6a15b142f3d487f70ca231996ebdce3e76b19923628e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminal.test_keyboard_interrupt_TestTerminal.test_keyboard_interrupt.None_2": {"doc_hash": "69e5568e93d29f66c2f7faded603eb9eaef8d66e452d436d96c9e331e7e398e0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminal.test_keyboard_in_sessionstart_TestTerminal.test_rewrite.assert_f_getvalue_h": {"doc_hash": "eba3e0721684fb137013ecd69fa3e2abc10087a486708022e17f4948ad5f8050"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestCollectonly_TestCollectonly.test_collectonly_fatal.assert_result_ret_3": {"doc_hash": "2eb39f18fb8edcc91bc64522bcc1e9abf98081afd70f4e71f0f57c12f64ccc65"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestCollectonly.test_collectonly_simple_TestCollectonly.test_collectonly_simple.result_stdout_fnmatch_lin": {"doc_hash": "ed51c821e822d2ef2af773f104a9c0537b852a29d7788abdaed8423d42d87853"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestCollectonly.test_collectonly_error_TestCollectonly.test_collectonly_more_quiet.result_stdout_fnmatch_lin": {"doc_hash": "487f570c0e64c758bbac25916dc36894294c4b6c9f5bb130bd0f727dba8c5f03"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestFixtureReporting_TestFixtureReporting.test_setup_fixture_error.assert_result_ret_0": {"doc_hash": "219b59819c6dd6c2f29e2824404c6937160a8811d8530c7433c2ed361c2c6272"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestFixtureReporting.test_teardown_fixture_error_TestFixtureReporting.test_teardown_fixture_error.result_stdout_fnmatch_lin": {"doc_hash": "8830e4f34717615f5c6728f45a09e388fcebf3b0b4136790d1c5a7f03b03d3e4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestFixtureReporting.test_teardown_fixture_error_and_test_failure_TestFixtureReporting.test_teardown_fixture_error_and_test_failure.result_stdout_fnmatch_lin": {"doc_hash": "efaaec6631fb081b948d56203354c007ec95496a06d261da5948b2e2a982ab6b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestFixtureReporting.test_setup_teardown_output_and_test_failure_TestFixtureReporting.test_setup_teardown_output_and_test_failure.result_stdout_fnmatch_lin": {"doc_hash": "a665fd0f7bcee4859c7b2569148c2b7dd8a888e75986f7728be0b06995ec7d1e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional_TestTerminalFunctional.test_deselected.assert_result_ret_0": {"doc_hash": "2699e311226ea1a0537c8c818a60abce1ca08a19c04f3c5c72cfe6790027996d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_deselected_with_hookwrapper_TestTerminalFunctional.test_deselected_with_hookwrapper.assert_result_ret_0": {"doc_hash": "fe09be24d9fcde13c63576e56d1d08298e1cf0fd95cb4c554a5990bca21e7ba8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_show_deselected_items_using_markexpr_before_test_execution_TestTerminalFunctional.test_show_deselected_items_using_markexpr_before_test_execution.assert_result_ret_0": {"doc_hash": "6710005c25cdada10f5200737069a6decedf959194c74f2534051022171b5242"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_no_skip_summary_if_failure_TestTerminalFunctional.test_passes.assert_result_ret_0": {"doc_hash": "647e336f3f827b7c9b87d0a79ea8ac4abd97a0e712355f5e6cb5d46aa3462845"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_header_trailer_info_TestTerminalFunctional.test_header_trailer_info.if_request_config_pluginm.result_stdout_fnmatch_lin": {"doc_hash": "14bec523d67fc7da122a7fdf332a55c1052fc12bf9a928b103a9902d0c519db9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_header_TestTerminalFunctional.test_header.None_7": {"doc_hash": "5f926eca3f1e5bb6a46e4b54fd7afda9e0bcc4243a542cb2239b696119a9a56e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_showlocals_TestTerminalFunctional.verbose_testfile.return.testdir_makepyfile_": {"doc_hash": "581de1336b8b803739f58cd4ffce02f14a3e30de1f725c0b510f655f1e6c8401"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_verbose_reporting_TestTerminalFunctional.test_verbose_reporting.assert_result_ret_1": {"doc_hash": "f33c76fae29d5e36919c5d507e472d1e7c00a9d0aef87e0a069855dcf0e0cd55"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_verbose_reporting_xdist_TestTerminalFunctional.test_verbose_reporting_xdist.assert_result_ret_1": {"doc_hash": "898da6e7a17ba363441e982f359eee2f938368fa43e5023f028c18d1303715d9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_quiet_reporting_TestTerminalFunctional.test_more_quiet_reporting.assert_passed_not_in_s": {"doc_hash": "43cac7e73793b527c923c250159ed10e37a57529a3145af721c67a9e854f59e2"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_report_collectionfinish_hook_TestTerminalFunctional.test_report_collectionfinish_hook.result_stdout_fnmatch_lin": {"doc_hash": "103d4cba69047806d65c4d95536f096faf43c5b86822c836194d8d610106429b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_pass_output_reporting_test_pass_output_reporting.result_stdout_fnmatch_lin": {"doc_hash": "b5e466062c9ad24e472a185b33eb3731ec1b90192833300a835e4c56934a4e2f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_color_yes_test_color_no.assert_x1b_1m_not_in_r": {"doc_hash": "aca9f4dfc8ba9cd4e5e31a772c9bbe8d03b5f2c7a140f6ec22121e26fd58fb71"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_color_yes_collection_on_non_atty_test_color_yes_collection_on_non_atty.assert_collected_10_item": {"doc_hash": "eea01c444a62dd338c1b9abb7c072ee7e7d5d5ab1c1f3e74f1427509826c8973"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_getreportopt_test_getreportopt.None_6": {"doc_hash": "f4d35c35460df71e357916e985b5161048d2be939f811ace3436358395a09869"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_terminalreporter_reportopt_addopts_test_terminalreporter_reportopt_addopts.result_stdout_fnmatch_lin": {"doc_hash": "054443133501b6b89a150d9a42dbb8374f08e3696a60594b32cac94aa67c6a96"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_tbstyle_short_test_traceconfig.assert_result_ret_EXIT": {"doc_hash": "78a56e7354bf902f8b606ac5871a71e96d3734aab8209c30b8e1743eb967b847"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestGenericReporting_TestGenericReporting.test_maxfailures.result_stdout_fnmatch_lin": {"doc_hash": "37db2dd59e1e9f52005f3bb51421e2fd69dd07533d4791ae3f94d99bc86ceed3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestGenericReporting.test_tb_option_TestGenericReporting.test_tb_option.for_tbopt_in_long_sh.if_tbopt_no_.else_.assert_IndexError_not_i": {"doc_hash": "871c97e7c23331a5fd03780a488a982a6d59687cb3fc6428f4f395c269feaeca"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestGenericReporting.test_tb_crashline_TestGenericReporting.test_tb_crashline.assert_def_test_func2_n": {"doc_hash": "ff61ec69fd7e715aa6ed2cff2ea617e334a91d236b75f2fb7c6426af11c7218e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestGenericReporting.test_pytest_report_header_TestGenericReporting.test_pytest_report_header.result_stdout_fnmatch_lin": {"doc_hash": "765f616dbf6043d345418338cee2e11a43beb4b4795821d0bbdce718bf2d3911"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestGenericReporting.test_show_capture_TestGenericReporting.test_show_capture.None_11": {"doc_hash": "13c9725ec2cdb90fe6026d0ab2901c0f30ff319f1ad349b752250762c8eeaedd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestGenericReporting.test_show_capture_with_teardown_logs_TestGenericReporting.test_show_capture_with_teardown_logs.None_11": {"doc_hash": "1550a0cb29b63ab7be506beb024b44a5a24113858ba4d6ddc5eea210368ee5a0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_fdopen_kept_alive_issue124_test_fdopen_kept_alive_issue124.result_stdout_fnmatch_lin": {"doc_hash": "6b9aa8b5eea645603f89434a04f21d1a008bb16f392453b61001b34a20048497"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_tbstyle_native_setup_error_test_terminal_summary.result_stdout_fnmatch_lin": {"doc_hash": "4151788a50540e6cfb65dcb839109cee36e4abd451b31006284e25dde91d58e7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_terminal_summary_warnings_are_displayed_test_terminal_summary_warnings_are_displayed.assert_stdout_count_": {"doc_hash": "36ee92e5eea9388d6e6be31e39ae9093d9746fc0c2ee33a0e7698c8b31eeb32d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_terminal_summary_warnings_header_once_test_terminal_summary_warnings_header_once.assert_stdout_count_": {"doc_hash": "caebc6802789d5586c0ff49348b42edfe4388d0fe9454b4dd2aa7a5685655da0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_summary_stats_test_summary_stats.assert_color_exp_color": {"doc_hash": "6c63022501d2cd8fcb1e083d6815f2221ac6a7668aaf109c6c42319fa5fbeacc"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_skip_counting_towards_summary_TestClassicOutputStyle.test_normal_verbosity.result_stdout_fnmatch_lin": {"doc_hash": "103b94e9a1fe54548bed1fd542f7179d57370c18ed209d278eb467c62aa8a36c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestClassicOutputStyle.test_verbose_TestClassicOutputStyle.test_quiet.result_stdout_fnmatch_lin": {"doc_hash": "0bea93b29c3c1e65d532f98159863c5373696d8942532c9d766598a41e736647"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressOutputStyle_TestProgressOutputStyle.many_tests_files.testdir_makepyfile_": {"doc_hash": "8c8bd8151f529feca4cf389058b0c7ba7e6473198b298bbb1e69afa4f73fc97f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressOutputStyle.test_zero_tests_collected_TestProgressOutputStyle.test_zero_tests_collected.output_stdout_fnmatch_lin": {"doc_hash": "baf8b51fe300ce4d583485fb324b35581fdb1684db3fd152c0606fae7bd4c001"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressOutputStyle.test_normal_TestProgressOutputStyle.test_count.output_stdout_re_match_li": {"doc_hash": "489fe95a78375dceeb244f9af6685f0f2902c282528c78cf73bba64c4abfbbe8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressOutputStyle.test_verbose_TestProgressOutputStyle.test_verbose.output_stdout_re_match_li": {"doc_hash": "93a2cce7957e4e0fdd84c9b55d61e5afb2a861a5ae02b324c485cee302baee28"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressOutputStyle.test_verbose_count_TestProgressOutputStyle.test_verbose_count.output_stdout_re_match_li": {"doc_hash": "ec9e4964b46485642e55e434c7e011855f979d12a8527d63d824ba59777e965b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressOutputStyle.test_xdist_normal_TestProgressOutputStyle.test_xdist_normal_count.output_stdout_re_match_li": {"doc_hash": "4919631a074eba434a683eef3216cf19c39d24d02cab0f9638283a28596517b7"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressOutputStyle.test_xdist_verbose_TestProgressOutputStyle.test_capture_no.assert_not_in_output": {"doc_hash": "e3850c52f121d6afbfdc8a18303f1b6f483dcdf634b92c2ff7973d713056ccc0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressWithTeardown_TestProgressWithTeardown.many_files.testdir_makepyfile_": {"doc_hash": "464145e336c1784cbf534b7584a595c20b4cde647b02274afdc25c9c282a422f"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressWithTeardown.test_teardown_simple_TestProgressWithTeardown.test_teardown_many.output_stdout_re_match_li": {"doc_hash": "97d9d48ee582a03f235542a620b0c66c3e0664e0f2e05f67ce63f2b7aa7c9db0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressWithTeardown.test_teardown_many_verbose_TestProgressWithTeardown.test_xdist_normal.output_stdout_re_match_li": {"doc_hash": "8eda0afdf30e6095c910a44e9351b4e34bf5ac46b334071873fe39c531537c3c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_from___future___import_ab_FakeConfig.option.return.self": {"doc_hash": "a5ebe529b1558eb1a6458121e65a193862a72574159e1555c5a033be33b0ad8b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_TestTempdirHandler_TestTempdirHandler.test_tmppath_relative_basetemp_absolute.assert_t_getbasetemp_re": {"doc_hash": "4c24dca83794135f9f5407f0e50b8f50c1d17b269d23e2d872ee4d818ada0c4e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_TestConfigTmpdir_test_basetemp.assert_mytemp_join_hello": {"doc_hash": "b1dee10548d024bc223f62b134c3fd018db75f2b47ddca9ff41134343e971703"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_test_tmpdir_always_is_realpath_test_tmpdir_always_is_realpath.assert_not_result_ret": {"doc_hash": "7dde5246239ebd2d4212a0c8fb271cfb45c5827223e2dd8e0b105976613601b8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_test_tmp_path_always_is_realpath_test_tmp_path_always_is_realpath.reprec_assertoutcome_pass": {"doc_hash": "3f6ff54cbc72d5a0caeb2d3add5e37b188e46d66e566ffe5be69fe8fa4bb8360"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_test_tmpdir_too_long_on_parametrization_test_tmpdir_factory.reprec_assertoutcome_pass": {"doc_hash": "320492f05417e594ef169cd51538144c635584d7912777651893c7fae13e5bf9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_test_tmpdir_fallback_tox_env_break_getuser.for_envvar_in_LOGNAME_.monkeypatch_delenv_envvar": {"doc_hash": "5f0f8f2102a5a6bb63502527aa23f4b4412bf664879747e4cd42b7f7672315ba"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_test_tmpdir_fallback_uid_not_found_test_tmpdir_fallback_uid_not_found.reprec_assertoutcome_pass": {"doc_hash": "3123e91f22ab87041647eaea58e300a234396c0c63361b3a6acf0394320220a0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_test_get_user_uid_not_found_test_get_user.assert_get_user_is_None": {"doc_hash": "753af62023ec559b2c6ed1c7233ad175c5d9181447cdf5f609caf1662beeb50a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_TestNumberedDir_TestNumberedDir.test_removal_accepts_lock.assert_folder_is_dir_": {"doc_hash": "efeb02a37095d6162f0fcb15452c6023d698ffe201b84df6a76e309ff99e9a37"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_attempt_symlink_to_": {"doc_hash": "f0a35b839dff70463d9b3e60f8a3162a28282777e73102865c9b2cd9418320ee"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_from___future___import_ab_test_simple_unittest.None_1": {"doc_hash": "a4e4fdf350062ff0847481d15af422e0a1ccb9d712c0a29df3042fe634ee566a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_runTest_method_test_isclasscheck_issue53.assert_result_ret_EXIT": {"doc_hash": "4b9621099fc2449fd5dead2c078e1065412c179a5e848b60aba6800336e63750"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_setup_test_setup.assert_rep_failed_and_42": {"doc_hash": "f2933a509b19aaf1ed7ff3c64d22c6233387426f730fc564e0a76504d50802d0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_setUpModule_test_setUpModule_failing_no_teardown.assert_not_call_item_modu": {"doc_hash": "865b3b63267048420dc8d9d800fb557a07f4e54e70a3712febe1372e5022ab52"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_new_instances_test_teardown.assert_passed_skipped_": {"doc_hash": "86ca36289a7f360c4e80e0b28b05e613322d76e7d93cedeae7f3710836f4b80a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_teardown_issue1649_test_teardown_issue1649.for_obj_in_gc_get_objects.assert_type_obj___name__": {"doc_hash": "7f88554f7acc455c71543a019e637d7c821f0d4966ebb175eec8ffe287e761c4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_unittest_skip_issue148_test_method_and_teardown_failing_reporting.result_stdout_fnmatch_lin": {"doc_hash": "69dca904788136d204398c8fbc12e845639745fb973a3685bb412d3cad27edad"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_setup_failure_is_shown_test_setup_failure_is_shown.assert_never42_not_in_r": {"doc_hash": "ed7ecee0585419b73043758a725ffac7c552168e0c4ecacc975c0506b430f0fc"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_setup_setUpClass_test_setup_setUpClass.reprec_assertoutcome_pass": {"doc_hash": "7fcf23ef7ed6ece48ada10f5b78d5ffcc205cdb1a10c5d911ef52f7563abb7d6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_setup_class_test_setup_class.reprec_assertoutcome_pass": {"doc_hash": "e4518fd97afe81e387d03186811fb0912c4266f4241488d3898f82f126837b15"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_testcase_adderrorandfailure_defers_test_testcase_adderrorandfailure_defers.assert_should_not_raise_": {"doc_hash": "7baa241c5200f5cd8513f4a4653adb993bea9d19597aff742294b919a48f7a17"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_testcase_custom_exception_info_test_testcase_custom_exception_info.result_stdout_fnmatch_lin": {"doc_hash": "ef626594f43d1b801f1779de11d095dfa7573032d67132f1d26c4d526a5c4b1b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_testcase_totally_incompatible_exception_info_test_module_level_pytestmark.reprec_assertoutcome_skip": {"doc_hash": "b7b9212059207bea396a06294ed63dd56f11d78ab9e7f7195782279c647ef0f4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_TestTrialUnittest_TestTrialUnittest.test_trial_testcase_runtest_not_collected.None_3": {"doc_hash": "4be2eab03902efc099bf87fb0d29acc079eae6acf100dd059f89a60f86b28452"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_TestTrialUnittest.test_trial_exceptions_with_skips_TestTrialUnittest.test_trial_exceptions_with_skips.assert_result_ret_1_i": {"doc_hash": "18732eda9e95535804319e628f92dbe08424975cd1c4ede89f4810daf1e72918"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_TestTrialUnittest.test_trial_error_TestTrialUnittest.test_trial_error.result_stdout_fnmatch_lin": {"doc_hash": "84dca758730794d9952eb5f46f068464eed23ba7266c202ab8f373b28378436b"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_TestTrialUnittest.test_trial_pdb_TestTrialUnittest.test_trial_testfunction_todo_property.reprec_assertoutcome_skip": {"doc_hash": "1ea11eb6033bba35ef7bf0176b7a07923cb8ad76f99be09f69f3e20d35889c08"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_djangolike_testcase_test_djangolike_testcase.result_stdout_fnmatch_lin": {"doc_hash": "0c45e617cc229fefbc49f12192656330f2982bed26e4eee21c8003cc6b7242e3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_unittest_not_shown_in_traceback_test_unittest_typerror_traceback.assert_result_ret_1": {"doc_hash": "beeda08e800f6292b5d51ec95ae2193f63c9814c76278c5f26e74241e8c18bbe"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_unittest_expected_failure_for_failing_test_is_xfail_test_unittest_expected_failure_for_failing_test_is_xfail.assert_result_ret_0": {"doc_hash": "fd7f4f6dd1ac8a21d9ec989dc1dc5fc2d5502073100d6e0d16da3fd39db85f65"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_unittest_expected_failure_for_passing_test_is_fail_test_unittest_expected_failure_for_passing_test_is_fail.assert_result_ret_1_i": {"doc_hash": "1034533cd4b624098e0b4a209257e2a1932ae7866544ad716772f691d7d9ca17"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_unittest_setup_interaction_test_unittest_setup_interaction.result_stdout_fnmatch_lin": {"doc_hash": "7364499b3e8c4995a79b60daf7f1b7e1a8d3ae529cdc275771c0feff69df3547"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_non_unittest_no_setupclass_support_test_non_unittest_no_setupclass_support.reprec_assertoutcome_pass": {"doc_hash": "3094ec98ef828635c5c3fd8e0120697e55a5d4f46658feae89fb04d803b273d8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_no_teardown_if_setupclass_failed_test_no_teardown_if_setupclass_failed.reprec_assertoutcome_pass": {"doc_hash": "90a887e70755c3416bbd70270ac82366aaea313211043b2c7ffdc1e6ee8b1979"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_issue333_result_clearing_test_issue333_result_clearing.reprec_assertoutcome_fail": {"doc_hash": "92ef271886e12fa81831858ee6fda3da060e8decf216bf4f7e7f775a1ebe7303"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_unittest_raise_skip_issue748_test_unittest_raise_skip_issue748.result_stdout_fnmatch_lin": {"doc_hash": "d85fba8cbd9acc9e3c5b2748cf65a29ec2922faec35f65a6b363a3a549a3ed60"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_unittest_skip_issue1169_test_class_method_containing_test_issue1558.reprec_assertoutcome_pass": {"doc_hash": "8bbf7050e41e4bb17a459e3e2d0095878d1c2c3161ce8f3f921266f04564208a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_usefixtures_marker_on_unittest_test_usefixtures_marker_on_unittest.result_assert_outcomes_pa": {"doc_hash": "404755fdee0df63c28036ad33e214c37bb44d4de995361e1dcb2879426ada906"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_testcase_handles_init_exceptions_test_testcase_handles_init_exceptions.assert_ERROR_at_teardown": {"doc_hash": "4c5527e0de2bdffac088aa76adfed97d0cb5a47163d752cc9a1034ae00e211d8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_error_message_with_parametrized_fixtures_": {"doc_hash": "03c7c24c6687431f56342e34d0f0ce4ddedd6d3b62dbc70d450c66bb1f2468ad"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py__coding_utf8__pyfile_with_warnings.testdir_makepyfile_": {"doc_hash": "81d237dc953d1d3aeef4144d201ce8fb8d49f90dfbef06a169f5c3289f7570a8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_normal_flow_test_normal_flow.result_stdout_fnmatch_lin": {"doc_hash": "07b3ec4053bf08e4cfb580365fb927dd65507324e33a888d54f7b2424f9fbbab"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_setup_teardown_warnings_test_setup_teardown_warnings.result_stdout_fnmatch_lin": {"doc_hash": "590017430077316016d637b81c2239c09376322ad6ba5b9b8bc8c69ec9a5188d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_as_errors_test_as_errors.result_stdout_fnmatch_lin": {"doc_hash": "f3ee7b2ad8f36a519cd908674da431fd5492de2ad2ec66585a69aa545c031386"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_ignore_test_ignore.assert_WARNINGS_SUMMARY_H": {"doc_hash": "10a13a831f3488a25b9ec2b38cd24833e9651d48dbdcd2991f2dc0e85aec6604"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_unicode_test_unicode.result_stdout_fnmatch_lin": {"doc_hash": "9480ecf3349fd9afe70b7040640d7df6b238ade7f75d3b5642137a3eb7c415d0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_py2_unicode_test_py2_unicode.result_stdout_fnmatch_lin": {"doc_hash": "ad608ac5ae69a95923ffe5d3312d966a87cecdc34bc51e1df6dd91cd32f0c9ec"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_py2_unicode_ascii_test_py2_unicode_ascii.result_stdout_fnmatch_lin": {"doc_hash": "cf721daca38954d9c867d6f21b7a3eec4fdb13c953b78de7e06e054fb2f289a0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_works_with_filterwarnings_test_works_with_filterwarnings.result_stdout_fnmatch_lin": {"doc_hash": "a32a5ffcd09be45d3613aa0bb32c9ce5faf76e41238543803403d3920e87382a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_filterwarnings_mark_test_filterwarnings_mark.result_stdout_fnmatch_lin": {"doc_hash": "ed7fd3926b1c792a0bfdcb7fa2d15b0eba58dc1fd2d7c08e9997f1c4b7a630a1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_non_string_warning_argument_test_filterwarnings_mark_registration.assert_result_ret_0": {"doc_hash": "11e67af3317ca0dee8ae3f8e7eccca9b0af80d9a0d532ade031443e04653eed5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_warning_captured_hook_test_warning_captured_hook.assert_collected_expec": {"doc_hash": "8547cf64e323f1a8a0bd605efc5e7ad885f6c39d0d8f439186e42dc3feb93223"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_collection_warnings_test_collection_warnings.result_stdout_fnmatch_lin": {"doc_hash": "cc93df3baef5219cf849028c6b661f27259d64576e6ccf932cadc43d04f369fb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_mark_regex_escape_test_mark_regex_escape.assert_WARNINGS_SUMMARY_H": {"doc_hash": "13f696c05559a24cbbf6eb52001bb701f8fa594498347b7f26df921dda867b2e"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_hide_pytest_internal_warnings_test_hide_pytest_internal_warnings.None_1.else_.result_stdout_fnmatch_lin": {"doc_hash": "40b7829e92feb95faada3c11b22e22f95f41256c6f961b369846834c17b30e96"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_option_precedence_cmdline_over_ini_test_option_precedence_cmdline_over_ini.if_ignore_on_cmdline_.else_.result_stdout_fnmatch_lin": {"doc_hash": "ac25357d93db24dad8ac5cb12b7c1b052d111cd43cd9b46ae9b0b5f52194aceb"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_option_precedence_mark_test_option_precedence_mark.result_stdout_fnmatch_lin": {"doc_hash": "a3ffd4c09ae58f55276c79671952724474d881da2edd914fec56ed8a7389d7dd"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_TestDeprecationWarningsByDefault_TestDeprecationWarningsByDefault.create_file.testdir_makepyfile_": {"doc_hash": "499dbd1486b7cd81749e97ae4269d8477392c22f7b31591add1b8626e7d1f8e0"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_TestDeprecationWarningsByDefault.test_shown_by_default_TestDeprecationWarningsByDefault.test_shown_by_default.result_stdout_fnmatch_lin": {"doc_hash": "5541fe497f31e1c0a19c9524230a4064675298fe19e49cab95537c721b27577d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_TestDeprecationWarningsByDefault.test_hidden_by_ini_TestDeprecationWarningsByDefault.test_hidden_by_mark.result_stdout_fnmatch_lin": {"doc_hash": "d5e6e12795e27fc0a9ecb1b49c18b7e84ea4fdd70be9a859905e5f5591e36d04"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_TestDeprecationWarningsByDefault.test_hidden_by_cmdline_TestDeprecationWarningsByDefault.test_hidden_by_system.assert_WARNINGS_SUMMARY_H": {"doc_hash": "e86abc9ca21a83d8588715aecde1c1192004389a07432bd8bede6d28b9c1caca"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_infinite_loop_warning_against_unicode_usage_py2_test_infinite_loop_warning_against_unicode_usage_py2.result_stdout_fnmatch_lin": {"doc_hash": "f9987fb202be0fe6b15c096f11009733e11916277839708d11f24a2bd80463d4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_removed_in_pytest4_warning_as_error_test_removed_in_pytest4_warning_as_error.if_change_default_is_None.else_.result_stdout_fnmatch_lin": {"doc_hash": "c76585c5dd7083c360b1da4ff0389d7b5b31c7059de5b0cfa1f383d8c47091b5"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_TestAssertionWarnings_TestAssertionWarnings.test_false_function_no_warn.result_stdout_fnmatch_lin": {"doc_hash": "b4e90df0bab020569c5c7694d70fbddb714e4d116827641ede1d4e5b477c5dc3"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_warnings_checker_twice_": {"doc_hash": "f74becac8e9f5cdb8d06da27ca21ac8678c72f3dc4dd5171504b25b0ca067648"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/failure_demo.py_six_TestFailing.test_not.assert_not_f_": {"doc_hash": "769e02c4f668c24bdca2e984fe3fa3af43a07a2b60518d3aa3c18597603b1c65"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_record_xml_attribute_record_xml_attribute.return.attr_func": {"doc_hash": "be731442ebd43b403b3144bc2d5c138f27a4c10ae0654a68b949d9eb0ee67d4c"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_pytest_addoption_pytest_addoption.None_13": {"doc_hash": "ec064bf85d70ebd0c7d77e572e4624358875db938ff2c08d434ac3663ce5e4f8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_pytest_addoption.None_14_pytest_addoption.None_17": {"doc_hash": "b52cc287ea6fcd66d5a65ab1b81de0b45cfc665ab5be58d065d9a7c2930ffffa"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/skipping.py__support_for_skip_xfai_pytest_addoption.parser_addini_": {"doc_hash": "83b2bc9cd1f7fa2dcc6824cee229e9afd5efced0fef55cd2de506ae3bd15f0d8"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py__terminal_reporting_of_REPORT_COLLECTING_RESOLUTION.0_5": {"doc_hash": "6be69a3bcf8b2e93f53180c70d7ab8916c8e6f6c26ff839402f157e39871e8ba"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.short_test_summary._get_pos_TerminalReporter.short_test_summary.if_lines_.for_line_in_lines_.self_write_line_line_": {"doc_hash": "9dfcd01ed19418519ccc79eb6be990d4f02f5f920a0d7177268fc17278010c1a"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_parameterset_for_parametrize_bad_markname_test_addmarker_order.assert_extracted_c_": {"doc_hash": "7ade4114d0207e14ebf290589731792886501fbcc1e631aa893609014eaeb171"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_test_trace_after_runpytest_test_trace_after_runpytest.assert_child_exitstatus_": {"doc_hash": "55b37eac76980d28409f695da6f5b5b4d0bf9f15610fc48258f22b4d9f49dff9"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_test_pdbcls_via_local_module_": {"doc_hash": "183ff5904058c8b9bac6c86a72f87ab8de0027a36cd1a9817ccd15a34c940cf4"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_from___future___import_ab_TestEvaluator.test_marked_one_arg.assert_expl_condition": {"doc_hash": "325965eda5755ba11f7f2d8c7bf73e85e42ec69f50cf3f85e7015d2e3fb7639d"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py___option.return.request_param": {"doc_hash": "6afe39bd6d69c2ee7e87a1bb4eb4229254b28df671567459047e86b071736db6"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_fail_extra_reporting_test_pass_reporting_on_fail.assert_short_test_summar": {"doc_hash": "85fc7b82e14102892d95f9a0174350c11c99f83625eb18edd3dafea38dc91404"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_skip_reasons_folding_": {"doc_hash": "7ec0c266a9f44a501f9e43510886a4ea2eb9f9031f44f94e359011cba3e61037"}}, "docstore/data": {"/tmp/repos/swe-bench_pytest-dev__pytest/bench/bench.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/bench/bench.py__", "embedding": null, "metadata": {"file_path": "bench/bench.py", "file_name": "bench.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 14, "span_ids": ["imports", "imports:2", "impl:2", "impl"], "tokens": 101}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import sys\n\nif __name__ == \"__main__\":\n import cProfile\n import pytest # NOQA\n import pstats\n\n script = sys.argv[1:] if len(sys.argv) > 1 else [\"empty.py\"]\n stats = cProfile.run(\"pytest.cmdline.main(%r)\" % script, \"prof\")\n p = pstats.Stats(\"prof\")\n p.strip_dirs()\n p.sort_stats(\"cumulative\")\n print(p.print_stats(500))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/bench/bench_argcomplete.py__10000_iterations_just__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/bench/bench_argcomplete.py__10000_iterations_just__", "embedding": null, "metadata": {"file_path": "bench/bench_argcomplete.py", "file_name": "bench_argcomplete.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 20, "span_ids": ["imports", "docstring", "impl"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# 10000 iterations, just for relative comparison\n# 2.7.5 3.3.2\n# FilesCompleter 75.1109 69.2116\n# FastFilesCompleter 0.7383 1.0760\nimport timeit\n\nimports = [\n \"from argcomplete.completers import FilesCompleter as completer\",\n \"from _pytest._argcomplete import FastFilesCompleter as completer\",\n]\n\ncount = 1000 # only a few seconds\nsetup = \"%s\\nfc = completer()\"\nrun = 'fc(\"/d\")'\n\n\nif __name__ == \"__main__\":\n print(timeit.timeit(run, setup=setup % imports[0], number=count))\n print(timeit.timeit(run, setup=setup % imports[1], number=count))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/bench/empty.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/bench/empty.py__", "embedding": null, "metadata": {"file_path": "bench/empty.py", "file_name": "empty.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 5, "span_ids": ["imports", "impl"], "tokens": 26}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import six\n\nfor i in range(1000):\n six.exec_(\"def test_func_%d(): pass\" % i)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/bench/manyparam.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/bench/manyparam.py__", "embedding": null, "metadata": {"file_path": "bench/manyparam.py", "file_name": "manyparam.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 15, "span_ids": ["imports", "foo", "test_it", "test_it2"], "tokens": 40}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n\n@pytest.fixture(scope=\"module\", params=range(966))\ndef foo(request):\n return request.param\n\n\ndef test_it(foo):\n pass\n\n\ndef test_it2(foo):\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/bench/skip.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/bench/skip.py__", "embedding": null, "metadata": {"file_path": "bench/skip.py", "file_name": "skip.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 12, "span_ids": ["test_foo", "imports", "impl"], "tokens": 41}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from six.moves import range\n\nimport pytest\n\nSKIP = True\n\n\n@pytest.mark.parametrize(\"x\", range(5000))\ndef test_foo(x):\n if SKIP:\n pytest.skip(\"heh\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/_themes/flask_theme_support.py__flasky_extensions_fla_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/_themes/flask_theme_support.py__flasky_extensions_fla_", "embedding": null, "metadata": {"file_path": "doc/en/_themes/flask_theme_support.py", "file_name": "flask_theme_support.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 88, "span_ids": ["imports", "FlaskyStyle", "docstring"], "tokens": 1273}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# flasky extensions. flasky pygments style based on tango style\nfrom pygments.style import Style\nfrom pygments.token import Comment\nfrom pygments.token import Error\nfrom pygments.token import Generic\nfrom pygments.token import Keyword\nfrom pygments.token import Literal\nfrom pygments.token import Name\nfrom pygments.token import Number\nfrom pygments.token import Operator\nfrom pygments.token import Other\nfrom pygments.token import Punctuation\nfrom pygments.token import String\nfrom pygments.token import Whitespace\n\n\nclass FlaskyStyle(Style):\n background_color = \"#f8f8f8\"\n default_style = \"\"\n\n styles = {\n # No corresponding class for the following:\n # Text: \"\", # class: ''\n Whitespace: \"underline #f8f8f8\", # class: 'w'\n Error: \"#a40000 border:#ef2929\", # class: 'err'\n Other: \"#000000\", # class 'x'\n Comment: \"italic #8f5902\", # class: 'c'\n Comment.Preproc: \"noitalic\", # class: 'cp'\n Keyword: \"bold #004461\", # class: 'k'\n Keyword.Constant: \"bold #004461\", # class: 'kc'\n Keyword.Declaration: \"bold #004461\", # class: 'kd'\n Keyword.Namespace: \"bold #004461\", # class: 'kn'\n Keyword.Pseudo: \"bold #004461\", # class: 'kp'\n Keyword.Reserved: \"bold #004461\", # class: 'kr'\n Keyword.Type: \"bold #004461\", # class: 'kt'\n Operator: \"#582800\", # class: 'o'\n Operator.Word: \"bold #004461\", # class: 'ow' - like keywords\n Punctuation: \"bold #000000\", # class: 'p'\n # because special names such as Name.Class, Name.Function, etc.\n # are not recognized as such later in the parsing, we choose them\n # to look the same as ordinary variables.\n Name: \"#000000\", # class: 'n'\n Name.Attribute: \"#c4a000\", # class: 'na' - to be revised\n Name.Builtin: \"#004461\", # class: 'nb'\n Name.Builtin.Pseudo: \"#3465a4\", # class: 'bp'\n Name.Class: \"#000000\", # class: 'nc' - to be revised\n Name.Constant: \"#000000\", # class: 'no' - to be revised\n Name.Decorator: \"#888\", # class: 'nd' - to be revised\n Name.Entity: \"#ce5c00\", # class: 'ni'\n Name.Exception: \"bold #cc0000\", # class: 'ne'\n Name.Function: \"#000000\", # class: 'nf'\n Name.Property: \"#000000\", # class: 'py'\n Name.Label: \"#f57900\", # class: 'nl'\n Name.Namespace: \"#000000\", # class: 'nn' - to be revised\n Name.Other: \"#000000\", # class: 'nx'\n Name.Tag: \"bold #004461\", # class: 'nt' - like a keyword\n Name.Variable: \"#000000\", # class: 'nv' - to be revised\n Name.Variable.Class: \"#000000\", # class: 'vc' - to be revised\n Name.Variable.Global: \"#000000\", # class: 'vg' - to be revised\n Name.Variable.Instance: \"#000000\", # class: 'vi' - to be revised\n Number: \"#990000\", # class: 'm'\n Literal: \"#000000\", # class: 'l'\n Literal.Date: \"#000000\", # class: 'ld'\n String: \"#4e9a06\", # class: 's'\n String.Backtick: \"#4e9a06\", # class: 'sb'\n String.Char: \"#4e9a06\", # class: 'sc'\n String.Doc: \"italic #8f5902\", # class: 'sd' - like a comment\n String.Double: \"#4e9a06\", # class: 's2'\n String.Escape: \"#4e9a06\", # class: 'se'\n String.Heredoc: \"#4e9a06\", # class: 'sh'\n String.Interpol: \"#4e9a06\", # class: 'si'\n String.Other: \"#4e9a06\", # class: 'sx'\n String.Regex: \"#4e9a06\", # class: 'sr'\n String.Single: \"#4e9a06\", # class: 's1'\n String.Symbol: \"#4e9a06\", # class: 'ss'\n Generic: \"#000000\", # class: 'g'\n Generic.Deleted: \"#a40000\", # class: 'gd'\n Generic.Emph: \"italic #000000\", # class: 'ge'\n Generic.Error: \"#ef2929\", # class: 'gr'\n Generic.Heading: \"bold #000080\", # class: 'gh'\n Generic.Inserted: \"#00A000\", # class: 'gi'\n Generic.Output: \"#888\", # class: 'go'\n Generic.Prompt: \"#745334\", # class: 'gp'\n Generic.Strong: \"bold #000000\", # class: 'gs'\n Generic.Subheading: \"bold #800080\", # class: 'gu'\n Generic.Traceback: \"bold #a40000\", # class: 'gt'\n }", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/conf.py__coding_utf_8__add_module_names.False": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/conf.py__coding_utf_8__add_module_names.False", "embedding": null, "metadata": {"file_path": "doc/en/conf.py", "file_name": "conf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 105, "span_ids": ["impl:23", "imports", "docstring", "impl"], "tokens": 775}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -*- coding: utf-8 -*-\n#\n# pytest documentation build configuration file, created by\n# sphinx-quickstart on Fri Oct 8 17:54:28 2010.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The full version, including alpha/beta/rc tags.\n# The short X.Y version.\nimport datetime\nimport os\nimport sys\n\nfrom _pytest import __version__ as version\n\nrelease = \".\".join(version.split(\".\")[:2])\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n# sys.path.insert(0, os.path.abspath('.'))\n\nautodoc_member_order = \"bysource\"\ntodo_include_todos = 1\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n \"pygments_pytest\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n \"sphinx_removed_in\",\n \"sphinxcontrib_trio\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix of source filenames.\nsource_suffix = \".rst\"\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = \"contents\"\n\n# General information about the project.\nproject = u\"pytest\"\nyear = datetime.datetime.utcnow().year\ncopyright = u\"2015\u20132019 , holger krekel and pytest-dev team\"\n\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n# language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\n \"links.inc\",\n \"_build\",\n \"naming20.rst\",\n \"test/*\",\n \"old_*\",\n \"*attic*\",\n \"*/attic*\",\n \"funcargs.rst\",\n \"setup.rst\",\n \"example/remoteinterp.rst\",\n]\n\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\nadd_module_names = False", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/conf.py_pygments_style__the_title_page_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/conf.py_pygments_style__the_title_page_", "embedding": null, "metadata": {"file_path": "doc/en/conf.py", "file_name": "conf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 112, "end_line": 243, "span_ids": ["impl:42", "impl:23"], "tokens": 843}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "pygments_style = \"sphinx\"\n\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\nsys.path.append(os.path.abspath(\"_themes\"))\nhtml_theme_path = [\"_themes\"]\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"flask\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\"index_logo\": None}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \" v documentation\".\nhtml_title = \"pytest documentation\"\n\n# A shorter title for the navigation bar. Default is the same as html_title.\nhtml_short_title = \"pytest-%s\" % release\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = \"img/pytest1.png\"\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = \"img/pytest1favi.ico\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n# html_sidebars = {'index': 'indexsidebar.html'}\n\nhtml_sidebars = {\n \"index\": [\n \"sidebarintro.html\",\n \"globaltoc.html\",\n \"links.html\",\n \"sourcelink.html\",\n \"searchbox.html\",\n ],\n \"**\": [\n \"globaltoc.html\",\n \"relations.html\",\n \"links.html\",\n \"sourcelink.html\",\n \"searchbox.html\",\n ],\n}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n# html_additional_pages = {'index': 'index.html'}\n\n\n# If false, no module index is generated.\nhtml_domain_indices = True\n\n# If false, no index is generated.\nhtml_use_index = False\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = False\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\nhtmlhelp_basename = \"pytestdoc\"\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\n# The paper size ('letter' or 'a4').\n# latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n# latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\nlatex_documents = [\n (\n \"contents\",\n \"pytest.tex\",\n u\"pytest Documentation\",\n u\"holger krekel, trainer and consultant, http://merlinux.eu\",\n \"manual\",\n )\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/conf.py_latex_logo_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/conf.py_latex_logo_", "embedding": null, "metadata": {"file_path": "doc/en/conf.py", "file_name": "conf.py", "file_type": "text/x-python", "category": "implementation", "start_line": 244, "end_line": 344, "span_ids": ["impl:54", "impl:70", "setup"], "tokens": 663}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "latex_logo = \"img/pytest1.png\"\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n# latex_show_urls = False\n\n# Additional stuff for the LaTeX preamble.\n# latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\nlatex_domain_indices = False\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\nman_pages = [(\"usage\", \"pytest\", u\"pytest usage\", [u\"holger krekel at merlinux eu\"], 1)]\n\n\n# -- Options for Epub output ---------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = u\"pytest\"\nepub_author = u\"holger krekel at merlinux eu\"\nepub_publisher = u\"holger krekel at merlinux eu\"\nepub_copyright = u\"2013, holger krekel et alii\"\n\n# The language of the text. It defaults to the language option\n# or en if the language is not set.\n# epub_language = ''\n\n# The scheme of the identifier. Typical schemes are ISBN or URL.\n# epub_scheme = ''\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n# epub_identifier = ''\n\n# A unique identification for the text.\n# epub_uid = ''\n\n# HTML files that should be inserted before the pages created by sphinx.\n# The format is a list of tuples containing the path and title.\n# epub_pre_files = []\n\n# HTML files shat should be inserted after the pages created by sphinx.\n# The format is a list of tuples containing the path and title.\n# epub_post_files = []\n\n# A list of files that should not be packed into the epub file.\n# epub_exclude_files = []\n\n# The depth of the table of contents in toc.ncx.\n# epub_tocdepth = 3\n\n# Allow duplicate toc entries.\n# epub_tocdup = True\n\n\n# -- Options for texinfo output ------------------------------------------------\n\ntexinfo_documents = [\n (\n master_doc,\n \"pytest\",\n \"pytest Documentation\",\n (\n \"Holger Krekel@*Benjamin Peterson@*Ronny Pfannschmidt@*\"\n \"Floris Bruynooghe@*others\"\n ),\n \"pytest\",\n \"simple powerful testing with Python\",\n \"Programming\",\n 1,\n )\n]\n\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\"python\": (\"https://docs.python.org/3\", None)}\n\n\ndef setup(app):\n # from sphinx.ext.autodoc import cut_lines\n # app.connect('autodoc-process-docstring', cut_lines(4, what=['module']))\n app.add_object_type(\n \"confval\",\n \"confval\",\n objname=\"configuration value\",\n indextemplate=\"pair: %s; configuration value\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/conftest.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/conftest.py__", "embedding": null, "metadata": {"file_path": "doc/en/conftest.py", "file_name": "conftest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 2, "span_ids": ["impl"], "tokens": 7}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "collect_ignore = [\"conf.py\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/failure_demo.py_TestSpecialisedExplanations_TestSpecialisedExplanations.test_eq_attrs.assert_left_right": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/failure_demo.py_TestSpecialisedExplanations_TestSpecialisedExplanations.test_eq_attrs.assert_left_right", "embedding": null, "metadata": {"file_path": "doc/en/example/assertion/failure_demo.py", "file_name": "failure_demo.py", "file_type": "text/x-python", "category": "implementation", "start_line": 43, "end_line": 121, "span_ids": ["TestSpecialisedExplanations.test_eq_dataclass.Foo:2", "TestSpecialisedExplanations.test_eq_long_text_multiline", "TestSpecialisedExplanations.test_eq_dataclass", "TestSpecialisedExplanations.test_in_list", "TestSpecialisedExplanations.test_eq_attrs.Foo", "TestSpecialisedExplanations.test_eq_set", "TestSpecialisedExplanations.test_not_in_text_single_long_term", "TestSpecialisedExplanations.test_not_in_text_single_long", "TestSpecialisedExplanations.test_eq_longer_list", "TestSpecialisedExplanations.test_eq_dict", "TestSpecialisedExplanations.test_eq_list_long", "TestSpecialisedExplanations.test_not_in_text_single", "TestSpecialisedExplanations.test_eq_multiline_text", "TestSpecialisedExplanations.test_eq_similar_text", "TestSpecialisedExplanations.test_eq_list", "TestSpecialisedExplanations.test_eq_text", "TestSpecialisedExplanations.test_not_in_text_multiline", "TestSpecialisedExplanations.test_eq_dataclass.Foo", "TestSpecialisedExplanations.test_eq_attrs", "TestSpecialisedExplanations.test_eq_long_text", "TestSpecialisedExplanations", "TestSpecialisedExplanations.test_eq_attrs.Foo:2"], "tokens": 683}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSpecialisedExplanations(object):\n def test_eq_text(self):\n assert \"spam\" == \"eggs\"\n\n def test_eq_similar_text(self):\n assert \"foo 1 bar\" == \"foo 2 bar\"\n\n def test_eq_multiline_text(self):\n assert \"foo\\nspam\\nbar\" == \"foo\\neggs\\nbar\"\n\n def test_eq_long_text(self):\n a = \"1\" * 100 + \"a\" + \"2\" * 100\n b = \"1\" * 100 + \"b\" + \"2\" * 100\n assert a == b\n\n def test_eq_long_text_multiline(self):\n a = \"1\\n\" * 100 + \"a\" + \"2\\n\" * 100\n b = \"1\\n\" * 100 + \"b\" + \"2\\n\" * 100\n assert a == b\n\n def test_eq_list(self):\n assert [0, 1, 2] == [0, 1, 3]\n\n def test_eq_list_long(self):\n a = [0] * 100 + [1] + [3] * 100\n b = [0] * 100 + [2] + [3] * 100\n assert a == b\n\n def test_eq_dict(self):\n assert {\"a\": 0, \"b\": 1, \"c\": 0} == {\"a\": 0, \"b\": 2, \"d\": 0}\n\n def test_eq_set(self):\n assert {0, 10, 11, 12} == {0, 20, 21}\n\n def test_eq_longer_list(self):\n assert [1, 2] == [1, 2, 3]\n\n def test_in_list(self):\n assert 1 in [0, 2, 3, 4, 5]\n\n def test_not_in_text_multiline(self):\n text = \"some multiline\\ntext\\nwhich\\nincludes foo\\nand a\\ntail\"\n assert \"foo\" not in text\n\n def test_not_in_text_single(self):\n text = \"single foo line\"\n assert \"foo\" not in text\n\n def test_not_in_text_single_long(self):\n text = \"head \" * 50 + \"foo \" + \"tail \" * 20\n assert \"foo\" not in text\n\n def test_not_in_text_single_long_term(self):\n text = \"head \" * 50 + \"f\" * 70 + \"tail \" * 20\n assert \"f\" * 70 not in text\n\n def test_eq_dataclass(self):\n from dataclasses import dataclass\n\n @dataclass\n class Foo(object):\n a: int\n b: str\n\n left = Foo(1, \"b\")\n right = Foo(1, \"c\")\n assert left == right\n\n def test_eq_attrs(self):\n import attr\n\n @attr.s\n class Foo(object):\n a = attr.ib()\n b = attr.ib()\n\n left = Foo(1, \"b\")\n right = Foo(1, \"c\")\n assert left == right", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/failure_demo.py_test_attribute_globf.return.x_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/failure_demo.py_test_attribute_globf.return.x_1", "embedding": null, "metadata": {"file_path": "doc/en/example/assertion/failure_demo.py", "file_name": "failure_demo.py", "file_type": "text/x-python", "category": "implementation", "start_line": 124, "end_line": 161, "span_ids": ["test_attribute_instance.Foo:2", "test_attribute.Foo:2", "test_attribute_multiple", "test_attribute_instance.Foo", "test_attribute_multiple.Bar", "test_attribute", "globf", "test_attribute.Foo", "test_attribute_instance", "test_attribute_multiple.Foo", "test_attribute_failure", "test_attribute_multiple.Foo:2", "test_attribute_failure.Foo._get_b", "test_attribute_multiple.Bar:2", "test_attribute_failure.Foo"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_attribute():\n class Foo(object):\n b = 1\n\n i = Foo()\n assert i.b == 2\n\n\ndef test_attribute_instance():\n class Foo(object):\n b = 1\n\n assert Foo().b == 2\n\n\ndef test_attribute_failure():\n class Foo(object):\n def _get_b(self):\n raise Exception(\"Failed to get attrib\")\n\n b = property(_get_b)\n\n i = Foo()\n assert i.b == 2\n\n\ndef test_attribute_multiple():\n class Foo(object):\n b = 1\n\n class Bar(object):\n b = 2\n\n assert Foo().b == Bar().b\n\n\ndef globf(x):\n return x + 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/failure_demo.py_TestRaises_test_dynamic_compile_shows_nicely.module_foo_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/failure_demo.py_TestRaises_test_dynamic_compile_shows_nicely.module_foo_", "embedding": null, "metadata": {"file_path": "doc/en/example/assertion/failure_demo.py", "file_name": "failure_demo.py", "file_type": "text/x-python", "category": "implementation", "start_line": 166, "end_line": 204, "span_ids": ["TestRaises.test_raise", "TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it", "TestRaises", "TestRaises.func1", "test_dynamic_compile_shows_nicely", "TestRaises.test_tupleerror", "TestRaises.test_raises", "TestRaises.test_raises_doesnt", "TestRaises.test_some_error"], "tokens": 258}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRaises(object):\n def test_raises(self):\n s = \"qwe\"\n raises(TypeError, int, s)\n\n def test_raises_doesnt(self):\n raises(IOError, int, \"3\")\n\n def test_raise(self):\n raise ValueError(\"demo error\")\n\n def test_tupleerror(self):\n a, b = [1] # NOQA\n\n def test_reinterpret_fails_with_print_for_the_fun_of_it(self):\n items = [1, 2, 3]\n print(\"items is %r\" % items)\n a, b = items.pop()\n\n def test_some_error(self):\n if namenotexi: # NOQA\n pass\n\n def func1(self):\n assert 41 == 42\n\n\n# thanks to Matthew Scott for this test\ndef test_dynamic_compile_shows_nicely():\n import imp\n import sys\n\n src = \"def foo():\\n assert 1 == 0\\n\"\n name = \"abc-123\"\n module = imp.new_module(name)\n code = _pytest._code.compile(src, name, \"exec\")\n six.exec_(code, module.__dict__)\n sys.modules[name] = module\n module.foo()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/failure_demo.py_TestMoreErrors_TestMoreErrors.test_try_finally.try_.finally_.x.0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/failure_demo.py_TestMoreErrors_TestMoreErrors.test_try_finally.try_.finally_.x.0", "embedding": null, "metadata": {"file_path": "doc/en/example/assertion/failure_demo.py", "file_name": "failure_demo.py", "file_type": "text/x-python", "category": "implementation", "start_line": 205, "end_line": 252, "span_ids": ["TestMoreErrors.test_z1_unpack_error", "TestMoreErrors", "TestMoreErrors.test_instance", "TestMoreErrors.test_try_finally", "TestMoreErrors.test_complex_error", "TestMoreErrors.test_compare", "TestMoreErrors.test_z2_type_error", "TestMoreErrors.test_global_func", "TestMoreErrors.test_startswith", "TestMoreErrors.test_startswith_nested"], "tokens": 229}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMoreErrors(object):\n def test_complex_error(self):\n def f():\n return 44\n\n def g():\n return 43\n\n somefunc(f(), g())\n\n def test_z1_unpack_error(self):\n items = []\n a, b = items\n\n def test_z2_type_error(self):\n items = 3\n a, b = items\n\n def test_startswith(self):\n s = \"123\"\n g = \"456\"\n assert s.startswith(g)\n\n def test_startswith_nested(self):\n def f():\n return \"123\"\n\n def g():\n return \"456\"\n\n assert f().startswith(g())\n\n def test_global_func(self):\n assert isinstance(globf(42), float)\n\n def test_instance(self):\n self.x = 6 * 7\n assert self.x != 42\n\n def test_compare(self):\n assert globf(10) < 5\n\n def test_try_finally(self):\n x = 1\n try:\n assert x == 0\n finally:\n x = 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/failure_demo.py_TestCustomAssertMsg_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/failure_demo.py_TestCustomAssertMsg_", "embedding": null, "metadata": {"file_path": "doc/en/example/assertion/failure_demo.py", "file_name": "failure_demo.py", "file_type": "text/x-python", "category": "implementation", "start_line": 255, "end_line": 282, "span_ids": ["TestCustomAssertMsg.test_multiline.A", "TestCustomAssertMsg.test_single_line.A", "TestCustomAssertMsg.test_single_line.A:2", "TestCustomAssertMsg.test_custom_repr.JSON", "TestCustomAssertMsg.test_multiline.A:2", "TestCustomAssertMsg", "TestCustomAssertMsg.test_multiline", "TestCustomAssertMsg.test_custom_repr.JSON:2", "TestCustomAssertMsg.test_custom_repr", "TestCustomAssertMsg.test_single_line"], "tokens": 165}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCustomAssertMsg(object):\n def test_single_line(self):\n class A(object):\n a = 1\n\n b = 2\n assert A.a == b, \"A.a appears not to be b\"\n\n def test_multiline(self):\n class A(object):\n a = 1\n\n b = 2\n assert (\n A.a == b\n ), \"A.a appears not to be b\\nor does not appear to be b\\none of those\"\n\n def test_custom_repr(self):\n class JSON(object):\n a = 1\n\n def __repr__(self):\n return \"This is JSON\\n{\\n 'foo': 'bar'\\n}\"\n\n a = JSON()\n b = 2\n assert a.a == b, a", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/global_testmodule_config/conftest.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/global_testmodule_config/conftest.py__", "embedding": null, "metadata": {"file_path": "doc/en/example/assertion/global_testmodule_config/conftest.py", "file_name": "conftest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 15, "span_ids": ["imports", "pytest_runtest_setup", "impl"], "tokens": 81}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import py\n\nimport pytest\n\nmydir = py.path.local(__file__).dirpath()\n\n\ndef pytest_runtest_setup(item):\n if isinstance(item, pytest.Function):\n if not item.fspath.relto(mydir):\n return\n mod = item.getparent(pytest.Module).obj\n if hasattr(mod, \"hello\"):\n print(\"mod.hello {!r}\".format(mod.hello))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/global_testmodule_config/test_hello_world.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/global_testmodule_config/test_hello_world.py__", "embedding": null, "metadata": {"file_path": "doc/en/example/assertion/global_testmodule_config/test_hello_world.py", "file_name": "test_hello_world.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 6, "span_ids": ["impl", "test_func"], "tokens": 11}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "hello = \"world\"\n\n\ndef test_func():\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/test_failures.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/test_failures.py__", "embedding": null, "metadata": {"file_path": "doc/en/example/assertion/test_failures.py", "file_name": "test_failures.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 14, "span_ids": ["imports", "test_failure_demo_fails_properly", "impl"], "tokens": 105}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import py\n\nfailure_demo = py.path.local(__file__).dirpath(\"failure_demo.py\")\npytest_plugins = (\"pytester\",)\n\n\ndef test_failure_demo_fails_properly(testdir):\n target = testdir.tmpdir.join(failure_demo.basename)\n failure_demo.copy(target)\n failure_demo.copy(testdir.tmpdir.join(failure_demo.basename))\n result = testdir.runpytest(target, syspathinsert=True)\n result.stdout.fnmatch_lines([\"*44 failed*\"])\n assert result.ret != 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/test_setup_flow_example.py_setup_module_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/test_setup_flow_example.py_setup_module_", "embedding": null, "metadata": {"file_path": "doc/en/example/assertion/test_setup_flow_example.py", "file_name": "test_setup_flow_example.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 45, "span_ids": ["TestStateFullThing", "TestStateFullThing.test_23", "TestStateFullThing.setup_method", "teardown_module", "TestStateFullThing.teardown_class", "setup_module", "TestStateFullThing.test_42", "TestStateFullThing.setup_class"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def setup_module(module):\n module.TestStateFullThing.classcount = 0\n\n\nclass TestStateFullThing(object):\n def setup_class(cls):\n cls.classcount += 1\n\n def teardown_class(cls):\n cls.classcount -= 1\n\n def setup_method(self, method):\n self.id = eval(method.__name__[5:])\n\n def test_42(self):\n assert self.classcount == 1\n assert self.id == 42\n\n def test_23(self):\n assert self.classcount == 1\n assert self.id == 23\n\n\ndef teardown_module(module):\n assert module.TestStateFullThing.classcount == 0\n\n\n\"\"\" For this example the control flow happens as follows::\n import test_setup_flow_example\n setup_module(test_setup_flow_example)\n setup_class(TestStateFullThing)\n instance = TestStateFullThing()\n setup_method(instance, instance.test_42)\n instance.test_42()\n setup_method(instance, instance.test_23)\n instance.test_23()\n teardown_class(TestStateFullThing)\n teardown_module(test_setup_flow_example)\n\nNote that ``setup_class(TestStateFullThing)`` is called and not\n``TestStateFullThing.setup_class()`` which would require you\nto insert ``setup_class = classmethod(setup_class)`` to make\nyour setup function callable.\n\"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/conftest.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/conftest.py__", "embedding": null, "metadata": {"file_path": "doc/en/example/conftest.py", "file_name": "conftest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 2, "span_ids": ["impl"], "tokens": 7}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "collect_ignore = [\"nonpython\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/costlysetup/conftest.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/costlysetup/conftest.py__", "embedding": null, "metadata": {"file_path": "doc/en/example/costlysetup/conftest.py", "file_name": "conftest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 21, "span_ids": ["imports", "CostlySetup", "CostlySetup.finalize", "setup"], "tokens": 79}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n\n@pytest.fixture(\"session\")\ndef setup(request):\n setup = CostlySetup()\n yield setup\n setup.finalize()\n\n\nclass CostlySetup(object):\n def __init__(self):\n import time\n\n print(\"performing costly setup\")\n time.sleep(5)\n self.timecostly = 1\n\n def finalize(self):\n del self.timecostly", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/costlysetup/sub_a/__init__.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/costlysetup/sub_a/__init__.py__", "embedding": null, "metadata": {"file_path": "doc/en/example/costlysetup/sub_a/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 2, "span_ids": ["docstring"], "tokens": 1}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "#", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/costlysetup/sub_a/test_quick.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/costlysetup/sub_a/test_quick.py__", "embedding": null, "metadata": {"file_path": "doc/en/example/costlysetup/sub_a/test_quick.py", "file_name": "test_quick.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 3, "span_ids": ["test_quick"], "tokens": 8}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_quick(setup):\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/costlysetup/sub_b/__init__.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/costlysetup/sub_b/__init__.py__", "embedding": null, "metadata": {"file_path": "doc/en/example/costlysetup/sub_b/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 2, "span_ids": ["docstring"], "tokens": 1}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "#", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/costlysetup/sub_b/test_two.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/costlysetup/sub_b/test_two.py__", "embedding": null, "metadata": {"file_path": "doc/en/example/costlysetup/sub_b/test_two.py", "file_name": "test_two.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 7, "span_ids": ["test_something", "test_something_more"], "tokens": 34}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_something(setup):\n assert setup.timecostly == 1\n\n\ndef test_something_more(setup):\n assert setup.timecostly == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/multipython.py___python2.return.Python_request_param_pyt": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/multipython.py___python2.return.Python_request_param_pyt", "embedding": null, "metadata": {"file_path": "doc/en/example/multipython.py", "file_name": "multipython.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 22, "span_ids": ["impl", "docstring", "imports", "python1", "python2"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nmodule containing a parametrized tests testing cross-python\nserialization via the pickle module.\n\"\"\"\nimport distutils.spawn\nimport subprocess\nimport textwrap\n\nimport pytest\n\npythonlist = [\"python2.7\", \"python3.4\", \"python3.5\"]\n\n\n@pytest.fixture(params=pythonlist)\ndef python1(request, tmpdir):\n picklefile = tmpdir.join(\"data.pickle\")\n return Python(request.param, picklefile)\n\n\n@pytest.fixture(params=pythonlist)\ndef python2(request, python1):\n return Python(request.param, python1.picklefile)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/multipython.py_Python_Python.dumps.subprocess_check_call_se": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/multipython.py_Python_Python.dumps.subprocess_check_call_se", "embedding": null, "metadata": {"file_path": "doc/en/example/multipython.py", "file_name": "multipython.py", "file_type": "text/x-python", "category": "implementation", "start_line": 25, "end_line": 46, "span_ids": ["Python.dumps", "Python"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Python(object):\n def __init__(self, version, picklefile):\n self.pythonpath = distutils.spawn.find_executable(version)\n if not self.pythonpath:\n pytest.skip(\"{!r} not found\".format(version))\n self.picklefile = picklefile\n\n def dumps(self, obj):\n dumpfile = self.picklefile.dirpath(\"dump.py\")\n dumpfile.write(\n textwrap.dedent(\n r\"\"\"\n import pickle\n f = open({!r}, 'wb')\n s = pickle.dump({!r}, f, protocol=2)\n f.close()\n \"\"\".format(\n str(self.picklefile), obj\n )\n )\n )\n subprocess.check_call((self.pythonpath, str(dumpfile)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/multipython.py_Python.load_and_is_true_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/multipython.py_Python.load_and_is_true_", "embedding": null, "metadata": {"file_path": "doc/en/example/multipython.py", "file_name": "multipython.py", "file_type": "text/x-python", "category": "implementation", "start_line": 48, "end_line": 73, "span_ids": ["test_basic_objects", "Python.load_and_is_true"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Python(object):\n\n def load_and_is_true(self, expression):\n loadfile = self.picklefile.dirpath(\"load.py\")\n loadfile.write(\n textwrap.dedent(\n r\"\"\"\n import pickle\n f = open({!r}, 'rb')\n obj = pickle.load(f)\n f.close()\n res = eval({!r})\n if not res:\n raise SystemExit(1)\n \"\"\".format(\n str(self.picklefile), expression\n )\n )\n )\n print(loadfile)\n subprocess.check_call((self.pythonpath, str(loadfile)))\n\n\n@pytest.mark.parametrize(\"obj\", [42, {}, {1: 3}])\ndef test_basic_objects(python1, python2, obj):\n python1.dumps(obj)\n python2.load_and_is_true(\"obj == %s\" % obj)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/nonpython/conftest.py__content_of_conftest_py_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/nonpython/conftest.py__content_of_conftest_py_", "embedding": null, "metadata": {"file_path": "doc/en/example/nonpython/conftest.py", "file_name": "conftest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 47, "span_ids": ["YamlFile.collect", "docstring", "YamlFile", "YamlItem", "YamlItem.runtest", "YamlItem.repr_failure", "YamlException", "pytest_collect_file", "imports", "YamlItem.reportinfo"], "tokens": 321}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# content of conftest.py\nimport pytest\n\n\ndef pytest_collect_file(parent, path):\n if path.ext == \".yml\" and path.basename.startswith(\"test\"):\n return YamlFile(path, parent)\n\n\nclass YamlFile(pytest.File):\n def collect(self):\n import yaml # we need a yaml parser, e.g. PyYAML\n\n raw = yaml.safe_load(self.fspath.open())\n for name, spec in sorted(raw.items()):\n yield YamlItem(name, self, spec)\n\n\nclass YamlItem(pytest.Item):\n def __init__(self, name, parent, spec):\n super(YamlItem, self).__init__(name, parent)\n self.spec = spec\n\n def runtest(self):\n for name, value in sorted(self.spec.items()):\n # some custom test execution (dumb example follows)\n if name != value:\n raise YamlException(self, name, value)\n\n def repr_failure(self, excinfo):\n \"\"\" called when self.runtest() raises an exception. \"\"\"\n if isinstance(excinfo.value, YamlException):\n return \"\\n\".join(\n [\n \"usecase execution failed\",\n \" spec failed: %r: %r\" % excinfo.value.args[1:3],\n \" no further details known at this point.\",\n ]\n )\n\n def reportinfo(self):\n return self.fspath, 0, \"usecase: %s\" % self.name\n\n\nclass YamlException(Exception):\n \"\"\" custom exception for error reporting. \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/py2py3/conftest.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/py2py3/conftest.py__", "embedding": null, "metadata": {"file_path": "doc/en/example/py2py3/conftest.py", "file_name": "conftest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 17, "span_ids": ["DummyCollector.collect", "impl", "pytest_pycollect_makemodule", "DummyCollector", "imports"], "tokens": 86}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import sys\n\nimport pytest\n\npy3 = sys.version_info[0] >= 3\n\n\nclass DummyCollector(pytest.collect.File):\n def collect(self):\n return []\n\n\ndef pytest_pycollect_makemodule(path, parent):\n bn = path.basename\n if \"py3\" in bn and not py3 or (\"py2\" in bn and py3):\n return DummyCollector(path, parent=parent)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/py2py3/test_py2.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/py2py3/test_py2.py__", "embedding": null, "metadata": {"file_path": "doc/en/example/py2py3/test_py2.py", "file_name": "test_py2.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 6, "span_ids": ["test_exception_syntax"], "tokens": 26}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_exception_syntax():\n try:\n 0 / 0\n except ZeroDivisionError, e:\n assert e", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/py2py3/test_py3.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/py2py3/test_py3.py__", "embedding": null, "metadata": {"file_path": "doc/en/example/py2py3/test_py3.py", "file_name": "test_py3.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 6, "span_ids": ["test_exception_syntax"], "tokens": 26}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_exception_syntax():\n try:\n 0 / 0\n except ZeroDivisionError as e:\n assert e", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/pythoncollection.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/pythoncollection.py__", "embedding": null, "metadata": {"file_path": "doc/en/example/pythoncollection.py", "file_name": "pythoncollection.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 15, "span_ids": ["test_function", "TestClass", "TestClass.test_anothermethod", "TestClass.test_method", "docstring"], "tokens": 47}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# run this with $ pytest --collect-only test_collectonly.py\n#\n\n\ndef test_function():\n pass\n\n\nclass TestClass(object):\n def test_method(self):\n pass\n\n def test_anothermethod(self):\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/xfail_demo.py_pytest_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/xfail_demo.py_pytest_", "embedding": null, "metadata": {"file_path": "doc/en/example/xfail_demo.py", "file_name": "xfail_demo.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 39, "span_ids": ["impl", "test_hello6", "test_hello", "test_hello2", "test_hello7", "test_hello5", "test_hello3", "imports", "test_hello4"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\nxfail = pytest.mark.xfail\n\n\n@xfail\ndef test_hello():\n assert 0\n\n\n@xfail(run=False)\ndef test_hello2():\n assert 0\n\n\n@xfail(\"hasattr(os, 'sep')\")\ndef test_hello3():\n assert 0\n\n\n@xfail(reason=\"bug 110\")\ndef test_hello4():\n assert 0\n\n\n@xfail('pytest.__version__[0] != \"17\"')\ndef test_hello5():\n assert 0\n\n\ndef test_hello6():\n pytest.xfail(\"reason\")\n\n\n@xfail(raises=IndexError)\ndef test_hello7():\n x = []\n x[1] = 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/extra/get_issues.py_json_get_issues.while_1_.if_not_another_page_.return.issues": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/extra/get_issues.py_json_get_issues.while_1_.if_not_another_page_.return.issues", "embedding": null, "metadata": {"file_path": "extra/get_issues.py", "file_name": "get_issues.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 30, "span_ids": ["imports", "get_issues", "impl"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import json\n\nimport py\nimport requests\n\nissues_url = \"https://api.github.com/repos/pytest-dev/pytest/issues\"\n\n\ndef get_issues():\n issues = []\n url = issues_url\n while 1:\n get_data = {\"state\": \"all\"}\n r = requests.get(url, params=get_data)\n data = r.json()\n if r.status_code == 403:\n # API request limit exceeded\n print(data[\"message\"])\n exit(1)\n issues.extend(data)\n\n # Look for next page\n links = requests.utils.parse_header_links(r.headers[\"Link\"])\n another_page = False\n for link in links:\n if link[\"rel\"] == \"next\":\n url = link[\"url\"]\n another_page = True\n if not another_page:\n return issues", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/extra/get_issues.py_main__get_kind.return._issue_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/extra/get_issues.py_main__get_kind.return._issue_", "embedding": null, "metadata": {"file_path": "extra/get_issues.py", "file_name": "get_issues.py", "file_type": "text/x-python", "category": "implementation", "start_line": 33, "end_line": 52, "span_ids": ["_get_kind", "main"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def main(args):\n cachefile = py.path.local(args.cache)\n if not cachefile.exists() or args.refresh:\n issues = get_issues()\n cachefile.write(json.dumps(issues))\n else:\n issues = json.loads(cachefile.read())\n\n open_issues = [x for x in issues if x[\"state\"] == \"open\"]\n\n open_issues.sort(key=lambda x: x[\"number\"])\n report(open_issues)\n\n\ndef _get_kind(issue):\n labels = [l[\"name\"] for l in issue[\"labels\"]]\n for key in (\"bug\", \"enhancement\", \"proposal\"):\n if key in labels:\n return key\n return \"issue\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/extra/get_issues.py_report_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/extra/get_issues.py_report_", "embedding": null, "metadata": {"file_path": "extra/get_issues.py", "file_name": "get_issues.py", "file_type": "text/x-python", "category": "implementation", "start_line": 55, "end_line": 86, "span_ids": ["impl:3", "report"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def report(issues):\n for issue in issues:\n title = issue[\"title\"]\n # body = issue[\"body\"]\n kind = _get_kind(issue)\n status = issue[\"state\"]\n number = issue[\"number\"]\n link = \"https://github.com/pytest-dev/pytest/issues/%s/\" % number\n print(\"----\")\n print(status, kind, link)\n print(title)\n # print()\n # lines = body.split(\"\\n\")\n # print(\"\\n\".join(lines[:3]))\n # if len(lines) > 3 or len(body) > 240:\n # print(\"...\")\n print(\"\\n\\nFound %s open issues\" % len(issues))\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser(\"process bitbucket issues\")\n parser.add_argument(\n \"--refresh\", action=\"store_true\", help=\"invalidate cache, refresh issues\"\n )\n parser.add_argument(\n \"--cache\", action=\"store\", default=\"issues.json\", help=\"cache file\"\n )\n args = parser.parse_args()\n main(args)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/extra/setup-py.test/setup.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/extra/setup-py.test/setup.py__", "embedding": null, "metadata": {"file_path": "extra/setup-py.test/setup.py", "file_name": "setup.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 12, "span_ids": ["imports", "impl"], "tokens": 78}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import sys\nfrom distutils.core import setup\n\nif __name__ == \"__main__\":\n if \"sdist\" not in sys.argv[1:]:\n raise ValueError(\"please use 'pytest' pypi package instead of 'py.test'\")\n setup(\n name=\"py.test\",\n version=\"0.0\",\n description=\"please use 'pytest' for installation\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/scripts/release.py___announce.check_call_git_add_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/scripts/release.py___announce.check_call_git_add_", "embedding": null, "metadata": {"file_path": "scripts/release.py", "file_name": "release.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 65, "span_ids": ["imports", "announce", "docstring"], "tokens": 495}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nInvoke development tasks.\n\"\"\"\nimport argparse\nfrom pathlib import Path\nfrom subprocess import call\nfrom subprocess import check_call\nfrom subprocess import check_output\n\nfrom colorama import Fore\nfrom colorama import init\n\n\ndef announce(version):\n \"\"\"Generates a new release announcement entry in the docs.\"\"\"\n stdout = check_output([\"git\", \"describe\", \"--abbrev=0\", \"--tags\"])\n stdout = stdout.decode(\"utf-8\")\n last_version = stdout.strip()\n\n stdout = check_output(\n [\"git\", \"log\", \"{}..HEAD\".format(last_version), \"--format=%aN\"]\n )\n stdout = stdout.decode(\"utf-8\")\n\n contributors = set(stdout.splitlines())\n\n template_name = (\n \"release.minor.rst\" if version.endswith(\".0\") else \"release.patch.rst\"\n )\n template_text = (\n Path(__file__).parent.joinpath(template_name).read_text(encoding=\"UTF-8\")\n )\n\n contributors_text = (\n \"\\n\".join(\"* {}\".format(name) for name in sorted(contributors)) + \"\\n\"\n )\n text = template_text.format(version=version, contributors=contributors_text)\n\n target = Path(__file__).parent.joinpath(\n \"../doc/en/announce/release-{}.rst\".format(version)\n )\n target.write_text(text, encoding=\"UTF-8\")\n print(f\"{Fore.CYAN}[generate.announce] {Fore.RESET}Generated {target.name}\")\n\n # Update index with the new release entry\n index_path = Path(__file__).parent.joinpath(\"../doc/en/announce/index.rst\")\n lines = index_path.read_text(encoding=\"UTF-8\").splitlines()\n indent = \" \"\n for index, line in enumerate(lines):\n if line.startswith(\"{}release-\".format(indent)):\n new_line = indent + target.stem\n if line != new_line:\n lines.insert(index, new_line)\n index_path.write_text(\"\\n\".join(lines) + \"\\n\", encoding=\"UTF-8\")\n print(\n f\"{Fore.CYAN}[generate.announce] {Fore.RESET}Updated {index_path.name}\"\n )\n else:\n print(\n f\"{Fore.CYAN}[generate.announce] {Fore.RESET}Skip {index_path.name} (already contains release)\"\n )\n break\n\n check_call([\"git\", \"add\", str(target)])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/scripts/release.py_regen_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/scripts/release.py_regen_", "embedding": null, "metadata": {"file_path": "scripts/release.py", "file_name": "release.py", "file_type": "text/x-python", "category": "implementation", "start_line": 68, "end_line": 116, "span_ids": ["fix_formatting", "pre_release", "main", "impl", "changelog", "regen"], "tokens": 330}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def regen():\n \"\"\"Call regendoc tool to update examples and pytest output in the docs.\"\"\"\n print(f\"{Fore.CYAN}[generate.regen] {Fore.RESET}Updating docs\")\n check_call([\"tox\", \"-e\", \"regen\"])\n\n\ndef fix_formatting():\n \"\"\"Runs pre-commit in all files to ensure they are formatted correctly\"\"\"\n print(\n f\"{Fore.CYAN}[generate.fix linting] {Fore.RESET}Fixing formatting using pre-commit\"\n )\n call([\"pre-commit\", \"run\", \"--all-files\"])\n\n\ndef pre_release(version):\n \"\"\"Generates new docs, release announcements and creates a local tag.\"\"\"\n announce(version)\n regen()\n changelog(version, write_out=True)\n fix_formatting()\n\n msg = \"Preparing release version {}\".format(version)\n check_call([\"git\", \"commit\", \"-a\", \"-m\", msg])\n\n print()\n print(f\"{Fore.CYAN}[generate.pre_release] {Fore.GREEN}All done!\")\n print()\n print(f\"Please push your branch and open a PR.\")\n\n\ndef changelog(version, write_out=False):\n if write_out:\n addopts = []\n else:\n addopts = [\"--draft\"]\n check_call([\"towncrier\", \"--yes\", \"--version\", version] + addopts)\n\n\ndef main():\n init(autoreset=True)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"version\", help=\"Release version\")\n options = parser.parse_args()\n pre_release(options.version)\n\n\nif __name__ == \"__main__\":\n main()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/setup.py_from_setuptools_import_se_INSTALL_REQUIRES._": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/setup.py_from_setuptools_import_se_INSTALL_REQUIRES._", "embedding": null, "metadata": {"file_path": "setup.py", "file_name": "setup.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 17, "span_ids": ["imports", "docstring", "impl"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from setuptools import setup\n\n# TODO: if py gets upgrade to >=1.6,\n# remove _width_of_current_line in terminal.py\nINSTALL_REQUIRES = [\n \"py>=1.5.0\",\n \"six>=1.10.0\",\n \"setuptools\",\n \"attrs>=17.4.0\",\n 'more-itertools>=4.0.0,<6.0.0;python_version<=\"2.7\"',\n 'more-itertools>=4.0.0;python_version>\"2.7\"',\n \"atomicwrites>=1.0\",\n 'funcsigs>=1.0;python_version<\"3.0\"',\n 'pathlib2>=2.2.0;python_version<\"3.6\"',\n 'colorama;sys_platform==\"win32\"',\n \"pluggy>=0.9\",\n]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/setup.py_main_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/setup.py_main_", "embedding": null, "metadata": {"file_path": "setup.py", "file_name": "setup.py", "file_type": "text/x-python", "category": "implementation", "start_line": 21, "end_line": 43, "span_ids": ["main", "impl:3"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def main():\n setup(\n use_scm_version={\"write_to\": \"src/_pytest/_version.py\"},\n setup_requires=[\"setuptools-scm\", \"setuptools>=40.0\"],\n package_dir={\"\": \"src\"},\n # fmt: off\n extras_require={\n \"testing\": [\n \"argcomplete\",\n \"hypothesis>=3.56\",\n \"nose\",\n \"requests\",\n \"mock;python_version=='2.7'\",\n ],\n },\n # fmt: on\n install_requires=INSTALL_REQUIRES,\n )\n\n\nif __name__ == \"__main__\":\n main()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/__init__.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/__init__.py__", "embedding": null, "metadata": {"file_path": "src/_pytest/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 9, "span_ids": ["impl"], "tokens": 56}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "__all__ = [\"__version__\"]\n\ntry:\n from ._version import version as __version__\nexcept ImportError:\n # broken installation, we don't even try\n # unknown only works because we do poor mans version compare\n __version__ = \"unknown\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_argcomplete.py__allow_bash_completion__from_glob_import_glob": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_argcomplete.py__allow_bash_completion__from_glob_import_glob", "embedding": null, "metadata": {"file_path": "src/_pytest/_argcomplete.py", "file_name": "_argcomplete.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 62, "span_ids": ["imports", "docstring"], "tokens": 559}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"allow bash-completion for argparse with argcomplete if installed\nneeds argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail\nto find the magic string, so _ARGCOMPLETE env. var is never set, and\nthis does not need special code.\n\nFunction try_argcomplete(parser) should be called directly before\nthe call to ArgumentParser.parse_args().\n\nThe filescompleter is what you normally would use on the positional\narguments specification, in order to get \"dirname/\" after \"dirn\"\ninstead of the default \"dirname \":\n\n optparser.add_argument(Config._file_or_dir, nargs='*'\n ).completer=filescompleter\n\nOther, application specific, completers should go in the file\ndoing the add_argument calls as they need to be specified as .completer\nattributes as well. (If argcomplete is not installed, the function the\nattribute points to will not be used).\n\nSPEEDUP\n=======\nThe generic argcomplete script for bash-completion\n(/etc/bash_completion.d/python-argcomplete.sh )\nuses a python program to determine startup script generated by pip.\nYou can speed up completion somewhat by changing this script to include\n # PYTHON_ARGCOMPLETE_OK\nso the the python-argcomplete-check-easy-install-script does not\nneed to be called to find the entry point of the code and see if that is\nmarked with PYTHON_ARGCOMPLETE_OK\n\nINSTALL/DEBUGGING\n=================\nTo include this support in another application that has setup.py generated\nscripts:\n- add the line:\n # PYTHON_ARGCOMPLETE_OK\n near the top of the main python entry point\n- include in the file calling parse_args():\n from _argcomplete import try_argcomplete, filescompleter\n , call try_argcomplete just before parse_args(), and optionally add\n filescompleter to the positional arguments' add_argument()\nIf things do not work right away:\n- switch on argcomplete debugging with (also helpful when doing custom\n completers):\n export _ARC_DEBUG=1\n- run:\n python-argcomplete-check-easy-install-script $(which appname)\n echo $?\n will echo 0 if the magic line has been found, 1 if not\n- sometimes it helps to find early on errors using:\n _ARGCOMPLETE=1 _ARC_DEBUG=1 appname\n which should throw a KeyError: 'COMPLINE' (which is properly set by the\n global argcomplete script).\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nfrom glob import glob", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_argcomplete.py_FastFilesCompleter_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_argcomplete.py_FastFilesCompleter_", "embedding": null, "metadata": {"file_path": "src/_pytest/_argcomplete.py", "file_name": "_argcomplete.py", "file_type": "text/x-python", "category": "implementation", "start_line": 65, "end_line": 110, "span_ids": ["impl", "FastFilesCompleter", "FastFilesCompleter.__call__"], "tokens": 284}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FastFilesCompleter(object):\n \"Fast file completer class\"\n\n def __init__(self, directories=True):\n self.directories = directories\n\n def __call__(self, prefix, **kwargs):\n \"\"\"only called on non option completions\"\"\"\n if os.path.sep in prefix[1:]:\n prefix_dir = len(os.path.dirname(prefix) + os.path.sep)\n else:\n prefix_dir = 0\n completion = []\n globbed = []\n if \"*\" not in prefix and \"?\" not in prefix:\n # we are on unix, otherwise no bash\n if not prefix or prefix[-1] == os.path.sep:\n globbed.extend(glob(prefix + \".*\"))\n prefix += \"*\"\n globbed.extend(glob(prefix))\n for x in sorted(globbed):\n if os.path.isdir(x):\n x += \"/\"\n # append stripping the prefix (like bash, not like compgen)\n completion.append(x[prefix_dir:])\n return completion\n\n\nif os.environ.get(\"_ARGCOMPLETE\"):\n try:\n import argcomplete.completers\n except ImportError:\n sys.exit(-1)\n filescompleter = FastFilesCompleter()\n\n def try_argcomplete(parser):\n argcomplete.autocomplete(parser, always_complete_options=False)\n\n\nelse:\n\n def try_argcomplete(parser):\n pass\n\n filescompleter = None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/__init__.py__python_inspection_cod_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/__init__.py__python_inspection_cod_", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 15, "span_ids": ["impl", "imports:11", "docstring:3", "docstring", "imports:10", "docstring:4", "imports", "docstring:2"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" python inspection/code generation API \"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom .code import Code # noqa\nfrom .code import ExceptionInfo # noqa\nfrom .code import filter_traceback # noqa\nfrom .code import Frame # noqa\nfrom .code import getrawcode # noqa\nfrom .code import Traceback # noqa\nfrom .source import compile_ as compile # noqa\nfrom .source import getfslineno # noqa\nfrom .source import Source # noqa", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/_py2traceback.py__copied_from_python_2_7__format_exception_only.return.lines": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/_py2traceback.py__copied_from_python_2_7__format_exception_only.return.lines", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/_py2traceback.py", "file_name": "_py2traceback.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 73, "span_ids": ["imports", "docstring", "format_exception_only"], "tokens": 588}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# copied from python-2.7.3's traceback.py\n# CHANGES:\n# - some_str is replaced, trying to create unicode strings\n#\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport types\n\nfrom six import text_type\n\n\ndef format_exception_only(etype, value):\n \"\"\"Format the exception part of a traceback.\n\n The arguments are the exception type and value such as given by\n sys.last_type and sys.last_value. The return value is a list of\n strings, each ending in a newline.\n\n Normally, the list contains a single string; however, for\n SyntaxError exceptions, it contains several lines that (when\n printed) display detailed information about where the syntax\n error occurred.\n\n The message indicating which exception occurred is always the last\n string in the list.\n\n \"\"\"\n\n # An instance should not have a meaningful value parameter, but\n # sometimes does, particularly for string exceptions, such as\n # >>> raise string1, string2 # deprecated\n #\n # Clear these out first because issubtype(string1, SyntaxError)\n # would throw another exception and mask the original problem.\n if (\n isinstance(etype, BaseException)\n or isinstance(etype, types.InstanceType)\n or etype is None\n or type(etype) is str\n ):\n return [_format_final_exc_line(etype, value)]\n\n stype = etype.__name__\n\n if not issubclass(etype, SyntaxError):\n return [_format_final_exc_line(stype, value)]\n\n # It was a syntax error; show exactly where the problem was found.\n lines = []\n try:\n msg, (filename, lineno, offset, badline) = value.args\n except Exception:\n pass\n else:\n filename = filename or \"\"\n lines.append(' File \"{}\", line {}\\n'.format(filename, lineno))\n if badline is not None:\n if isinstance(badline, bytes): # python 2 only\n badline = badline.decode(\"utf-8\", \"replace\")\n lines.append(\" {}\\n\".format(badline.strip()))\n if offset is not None:\n caretspace = badline.rstrip(\"\\n\")[:offset].lstrip()\n # non-space whitespace (likes tabs) must be kept for alignment\n caretspace = ((c.isspace() and c or \" \") for c in caretspace)\n # only three spaces to account for offset1 == pos 0\n lines.append(\" {}^\\n\".format(\"\".join(caretspace)))\n value = msg\n\n lines.append(_format_final_exc_line(stype, value))\n return lines", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/_py2traceback.py__format_final_exc_line_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/_py2traceback.py__format_final_exc_line_", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/_py2traceback.py", "file_name": "_py2traceback.py", "file_type": "text/x-python", "category": "implementation", "start_line": 76, "end_line": 95, "span_ids": ["_some_str", "_format_final_exc_line"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _format_final_exc_line(etype, value):\n \"\"\"Return a list of a single line -- normal case for format_exception_only\"\"\"\n valuestr = _some_str(value)\n if value is None or not valuestr:\n line = \"{}\\n\".format(etype)\n else:\n line = \"{}: {}\\n\".format(etype, valuestr)\n return line\n\n\ndef _some_str(value):\n try:\n return text_type(value)\n except Exception:\n try:\n return bytes(value).decode(\"UTF-8\", \"replace\")\n except Exception:\n pass\n return \"\".format(type(value).__name__)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_from___future___import_ab_if__PY3_.else_.format_exception_only": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_from___future___import_ab_if__PY3_.else_.format_exception_only", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 30, "span_ids": ["impl", "impl:2", "imports:24", "imports", "imports:23"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport inspect\nimport re\nimport sys\nimport traceback\nfrom inspect import CO_VARARGS\nfrom inspect import CO_VARKEYWORDS\nfrom weakref import ref\n\nimport attr\nimport pluggy\nimport py\nimport six\nfrom six import text_type\n\nimport _pytest\nfrom _pytest._io.saferepr import safeformat\nfrom _pytest._io.saferepr import saferepr\nfrom _pytest.compat import _PY2\nfrom _pytest.compat import _PY3\nfrom _pytest.compat import PY35\nfrom _pytest.compat import safe_str\n\nif _PY3:\n from traceback import format_exception_only\nelse:\n from ._py2traceback import format_exception_only", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_Code_Code.getargs.return.raw_co_varnames_argcount": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_Code_Code.getargs.return.raw_co_varnames_argcount", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 32, "end_line": 99, "span_ids": ["Code.source", "Code.getargs", "Code.fullsource", "Code:3", "Code.path", "Code.__eq__", "Code.__ne__", "Code"], "tokens": 479}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Code(object):\n \"\"\" wrapper around Python code objects \"\"\"\n\n def __init__(self, rawcode):\n if not hasattr(rawcode, \"co_filename\"):\n rawcode = getrawcode(rawcode)\n try:\n self.filename = rawcode.co_filename\n self.firstlineno = rawcode.co_firstlineno - 1\n self.name = rawcode.co_name\n except AttributeError:\n raise TypeError(\"not a code object: %r\" % (rawcode,))\n self.raw = rawcode\n\n def __eq__(self, other):\n return self.raw == other.raw\n\n __hash__ = None\n\n def __ne__(self, other):\n return not self == other\n\n @property\n def path(self):\n \"\"\" return a path object pointing to source code (note that it\n might not point to an actually existing file). \"\"\"\n try:\n p = py.path.local(self.raw.co_filename)\n # maybe don't try this checking\n if not p.check():\n raise OSError(\"py.path check failed.\")\n except OSError:\n # XXX maybe try harder like the weird logic\n # in the standard lib [linecache.updatecache] does?\n p = self.raw.co_filename\n\n return p\n\n @property\n def fullsource(self):\n \"\"\" return a _pytest._code.Source object for the full source file of the code\n \"\"\"\n from _pytest._code import source\n\n full, _ = source.findsource(self.raw)\n return full\n\n def source(self):\n \"\"\" return a _pytest._code.Source object for the code object's source only\n \"\"\"\n # return source only for that part of code\n import _pytest._code\n\n return _pytest._code.Source(self.raw)\n\n def getargs(self, var=False):\n \"\"\" return a tuple with the argument names for the code object\n\n if 'var' is set True also return the names of the variable and\n keyword arguments when present\n \"\"\"\n # handfull shortcut for getting args\n raw = self.raw\n argcount = raw.co_argcount\n if var:\n argcount += raw.co_flags & CO_VARARGS\n argcount += raw.co_flags & CO_VARKEYWORDS\n return raw.co_varnames[:argcount]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_Frame_Frame.getargs.return.retval": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_Frame_Frame.getargs.return.retval", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 103, "end_line": 163, "span_ids": ["Frame", "Frame.getargs", "Frame.eval", "Frame.is_true", "Frame.repr", "Frame.statement", "Frame.exec_"], "tokens": 406}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Frame(object):\n \"\"\"Wrapper around a Python frame holding f_locals and f_globals\n in which expressions can be evaluated.\"\"\"\n\n def __init__(self, frame):\n self.lineno = frame.f_lineno - 1\n self.f_globals = frame.f_globals\n self.f_locals = frame.f_locals\n self.raw = frame\n self.code = Code(frame.f_code)\n\n @property\n def statement(self):\n \"\"\" statement this frame is at \"\"\"\n import _pytest._code\n\n if self.code.fullsource is None:\n return _pytest._code.Source(\"\")\n return self.code.fullsource.getstatement(self.lineno)\n\n def eval(self, code, **vars):\n \"\"\" evaluate 'code' in the frame\n\n 'vars' are optional additional local variables\n\n returns the result of the evaluation\n \"\"\"\n f_locals = self.f_locals.copy()\n f_locals.update(vars)\n return eval(code, self.f_globals, f_locals)\n\n def exec_(self, code, **vars):\n \"\"\" exec 'code' in the frame\n\n 'vars' are optiona; additional local variables\n \"\"\"\n f_locals = self.f_locals.copy()\n f_locals.update(vars)\n six.exec_(code, self.f_globals, f_locals)\n\n def repr(self, object):\n \"\"\" return a 'safe' (non-recursive, one-line) string repr for 'object'\n \"\"\"\n return saferepr(object)\n\n def is_true(self, object):\n return object\n\n def getargs(self, var=False):\n \"\"\" return a list of tuples (name, value) for all arguments\n\n if 'var' is set True also include the variable and keyword\n arguments when present\n \"\"\"\n retval = []\n for arg in self.code.getargs(var):\n try:\n retval.append((arg, self.f_locals[arg]))\n except KeyError:\n pass # this can occur when using Psyco\n return retval", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_TracebackEntry_TracebackEntry.getfirstlinesource.return.max_self_frame_code_first": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_TracebackEntry_TracebackEntry.getfirstlinesource.return.max_self_frame_code_first", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 165, "end_line": 211, "span_ids": ["TracebackEntry.relline", "TracebackEntry.getlocals", "TracebackEntry.path", "TracebackEntry:7", "TracebackEntry.statement", "TracebackEntry.getfirstlinesource", "TracebackEntry", "TracebackEntry.__repr__", "TracebackEntry.frame", "TracebackEntry.set_repr_style"], "tokens": 315}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TracebackEntry(object):\n \"\"\" a single entry in a traceback \"\"\"\n\n _repr_style = None\n exprinfo = None\n\n def __init__(self, rawentry, excinfo=None):\n self._excinfo = excinfo\n self._rawentry = rawentry\n self.lineno = rawentry.tb_lineno - 1\n\n def set_repr_style(self, mode):\n assert mode in (\"short\", \"long\")\n self._repr_style = mode\n\n @property\n def frame(self):\n import _pytest._code\n\n return _pytest._code.Frame(self._rawentry.tb_frame)\n\n @property\n def relline(self):\n return self.lineno - self.frame.code.firstlineno\n\n def __repr__(self):\n return \"\" % (self.frame.code.path, self.lineno + 1)\n\n @property\n def statement(self):\n \"\"\" _pytest._code.Source object for the current statement \"\"\"\n source = self.frame.code.fullsource\n return source.getstatement(self.lineno)\n\n @property\n def path(self):\n \"\"\" path to the source code \"\"\"\n return self.frame.code.path\n\n def getlocals(self):\n return self.frame.f_locals\n\n locals = property(getlocals, None, None, \"locals of underlaying frame\")\n\n def getfirstlinesource(self):\n # on Jython this firstlineno can be -1 apparently\n return max(self.frame.code.firstlineno, 0)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_TracebackEntry.getsource_TracebackEntry.getsource.return.source_start_end_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_TracebackEntry.getsource_TracebackEntry.getsource.return.source_start_end_", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 213, "end_line": 237, "span_ids": ["TracebackEntry.getsource"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TracebackEntry(object):\n\n def getsource(self, astcache=None):\n \"\"\" return failing source code. \"\"\"\n # we use the passed in astcache to not reparse asttrees\n # within exception info printing\n from _pytest._code.source import getstatementrange_ast\n\n source = self.frame.code.fullsource\n if source is None:\n return None\n key = astnode = None\n if astcache is not None:\n key = self.frame.code.path\n if key is not None:\n astnode = astcache.get(key, None)\n start = self.getfirstlinesource()\n try:\n astnode, _, end = getstatementrange_ast(\n self.lineno, source, astnode=astnode\n )\n except SyntaxError:\n end = self.lineno + 1\n else:\n if key is not None:\n astcache[key] = astnode\n return source[start:end]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_TracebackEntry.source_TracebackEntry.ishidden.return.tbh": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_TracebackEntry.source_TracebackEntry.ishidden.return.tbh", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 239, "end_line": 256, "span_ids": ["TracebackEntry.ishidden", "TracebackEntry:9"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TracebackEntry(object):\n\n source = property(getsource)\n\n def ishidden(self):\n \"\"\" return True if the current frame has a var __tracebackhide__\n resolving to True.\n\n If __tracebackhide__ is a callable, it gets called with the\n ExceptionInfo instance and can decide whether to hide the traceback.\n\n mostly for internal use\n \"\"\"\n f = self.frame\n tbh = f.f_locals.get(\n \"__tracebackhide__\", f.f_globals.get(\"__tracebackhide__\", False)\n )\n if tbh and callable(tbh):\n return tbh(None if self._excinfo is None else self._excinfo())\n return tbh", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_TracebackEntry.__str___TracebackEntry.name.property_name_None_None": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_TracebackEntry.__str___TracebackEntry.name.property_name_None_None", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 258, "end_line": 275, "span_ids": ["TracebackEntry.__str__", "TracebackEntry:11", "TracebackEntry.name"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TracebackEntry(object):\n\n def __str__(self):\n try:\n fn = str(self.path)\n except py.error.Error:\n fn = \"???\"\n name = self.frame.code.name\n try:\n line = str(self.statement).lstrip()\n except KeyboardInterrupt:\n raise\n except: # noqa\n line = \"???\"\n return \" File %r:%d in %s\\n %s\\n\" % (fn, self.lineno + 1, name, line)\n\n def name(self):\n return self.frame.code.raw.co_name\n\n name = property(name, None, None, \"co_name of underlaying code\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_Traceback_Traceback.__init__.if_hasattr_tb_tb_next_.else_.list___init___self_tb_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_Traceback_Traceback.__init__.if_hasattr_tb_tb_next_.else_.list___init___self_tb_", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 278, "end_line": 297, "span_ids": ["Traceback.__init__.if_hasattr_tb_tb_next_.f", "Traceback"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Traceback(list):\n \"\"\" Traceback objects encapsulate and offer higher level\n access to Traceback entries.\n \"\"\"\n\n Entry = TracebackEntry\n\n def __init__(self, tb, excinfo=None):\n \"\"\" initialize from given python traceback object and ExceptionInfo \"\"\"\n self._excinfo = excinfo\n if hasattr(tb, \"tb_next\"):\n\n def f(cur):\n while cur is not None:\n yield self.Entry(cur, excinfo=excinfo)\n cur = cur.tb_next\n\n list.__init__(self, f(tb))\n else:\n list.__init__(self, tb)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_Traceback.cut_Traceback.cut.return.self": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_Traceback.cut_Traceback.cut.return.self", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 299, "end_line": 323, "span_ids": ["Traceback.cut"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Traceback(list):\n\n def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None):\n \"\"\" return a Traceback instance wrapping part of this Traceback\n\n by provding any combination of path, lineno and firstlineno, the\n first frame to start the to-be-returned traceback is determined\n\n this allows cutting the first part of a Traceback instance e.g.\n for formatting reasons (removing some uninteresting bits that deal\n with handling of the exception/traceback)\n \"\"\"\n for x in self:\n code = x.frame.code\n codepath = code.path\n if (\n (path is None or codepath == path)\n and (\n excludepath is None\n or not hasattr(codepath, \"relto\")\n or not codepath.relto(excludepath)\n )\n and (lineno is None or x.lineno == lineno)\n and (firstlineno is None or x.frame.code.firstlineno == firstlineno)\n ):\n return Traceback(x._rawentry, self._excinfo)\n return self", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_Traceback.__getitem___Traceback.getcrashentry.return.self_1_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_Traceback.__getitem___Traceback.getcrashentry.return.self_1_", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 325, "end_line": 351, "span_ids": ["Traceback.__getitem__", "Traceback.getcrashentry", "Traceback.filter"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Traceback(list):\n\n def __getitem__(self, key):\n val = super(Traceback, self).__getitem__(key)\n if isinstance(key, type(slice(0))):\n val = self.__class__(val)\n return val\n\n def filter(self, fn=lambda x: not x.ishidden()):\n \"\"\" return a Traceback instance with certain items removed\n\n fn is a function that gets a single argument, a TracebackEntry\n instance, and should return True when the item should be added\n to the Traceback, False when not\n\n by default this removes all the TracebackEntries which are hidden\n (see ishidden() above)\n \"\"\"\n return Traceback(filter(fn, self), self._excinfo)\n\n def getcrashentry(self):\n \"\"\" return last non-hidden traceback entry that lead\n to the exception of a traceback.\n \"\"\"\n for i in range(-1, -len(self) - 1, -1):\n entry = self[i]\n if not entry.ishidden():\n return entry\n return self[-1]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_Traceback.recursionindex_co_equal.compile_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_Traceback.recursionindex_co_equal.compile_", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 353, "end_line": 384, "span_ids": ["Traceback.recursionindex", "impl:3"], "tokens": 246}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Traceback(list):\n\n def recursionindex(self):\n \"\"\" return the index of the frame/TracebackEntry where recursion\n originates if appropriate, None if no recursion occurred\n \"\"\"\n cache = {}\n for i, entry in enumerate(self):\n # id for the code.raw is needed to work around\n # the strange metaprogramming in the decorator lib from pypi\n # which generates code objects that have hash/value equality\n # XXX needs a test\n key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno\n # print \"checking for recursion at\", key\n values = cache.setdefault(key, [])\n if values:\n f = entry.frame\n loc = f.f_locals\n for otherloc in values:\n if f.is_true(\n f.eval(\n co_equal,\n __recursioncache_locals_1=loc,\n __recursioncache_locals_2=otherloc,\n )\n ):\n return i\n values.append(entry.frame.f_locals)\n return None\n\n\nco_equal = compile(\n \"__recursioncache_locals_1 == __recursioncache_locals_2\", \"?\", \"eval\"\n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ExceptionInfo_ExceptionInfo.from_current.return.cls_tup__striptext_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ExceptionInfo_ExceptionInfo.from_current.return.cls_tup__striptext_", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 387, "end_line": 424, "span_ids": ["ExceptionInfo.from_current", "ExceptionInfo"], "tokens": 277}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s(repr=False)\nclass ExceptionInfo(object):\n \"\"\" wraps sys.exc_info() objects and offers\n help for navigating the traceback.\n \"\"\"\n\n _assert_start_repr = (\n \"AssertionError(u'assert \" if _PY2 else \"AssertionError('assert \"\n )\n\n _excinfo = attr.ib()\n _striptext = attr.ib(default=\"\")\n _traceback = attr.ib(default=None)\n\n @classmethod\n def from_current(cls, exprinfo=None):\n \"\"\"returns an ExceptionInfo matching the current traceback\n\n .. warning::\n\n Experimental API\n\n\n :param exprinfo: a text string helping to determine if we should\n strip ``AssertionError`` from the output, defaults\n to the exception message/``__str__()``\n \"\"\"\n tup = sys.exc_info()\n assert tup[0] is not None, \"no current exception\"\n _striptext = \"\"\n if exprinfo is None and isinstance(tup[1], AssertionError):\n exprinfo = getattr(tup[1], \"msg\", None)\n if exprinfo is None:\n exprinfo = saferepr(tup[1])\n if exprinfo and exprinfo.startswith(cls._assert_start_repr):\n _striptext = \"AssertionError: \"\n\n return cls(tup, _striptext)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ExceptionInfo.for_later_ExceptionInfo.__repr__.return._ExceptionInfo_s_tblen_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ExceptionInfo.for_later_ExceptionInfo.__repr__.return._ExceptionInfo_s_tblen_", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 426, "end_line": 466, "span_ids": ["ExceptionInfo.traceback", "ExceptionInfo.traceback_7", "ExceptionInfo.__repr__", "ExceptionInfo.typename", "ExceptionInfo.value", "ExceptionInfo.for_later", "ExceptionInfo.tb", "ExceptionInfo.type"], "tokens": 254}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s(repr=False)\nclass ExceptionInfo(object):\n\n @classmethod\n def for_later(cls):\n \"\"\"return an unfilled ExceptionInfo\n \"\"\"\n return cls(None)\n\n @property\n def type(self):\n \"\"\"the exception class\"\"\"\n return self._excinfo[0]\n\n @property\n def value(self):\n \"\"\"the exception value\"\"\"\n return self._excinfo[1]\n\n @property\n def tb(self):\n \"\"\"the exception raw traceback\"\"\"\n return self._excinfo[2]\n\n @property\n def typename(self):\n \"\"\"the type name of the exception\"\"\"\n return self.type.__name__\n\n @property\n def traceback(self):\n \"\"\"the traceback\"\"\"\n if self._traceback is None:\n self._traceback = Traceback(self.tb, excinfo=ref(self))\n return self._traceback\n\n @traceback.setter\n def traceback(self, value):\n self._traceback = value\n\n def __repr__(self):\n if self._excinfo is None:\n return \"\"\n return \"\" % (self.typename, len(self.traceback))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ExceptionInfo.exconly_ExceptionInfo._getreprcrash.return.ReprFileLocation_path_li": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ExceptionInfo.exconly_ExceptionInfo._getreprcrash.return.ReprFileLocation_path_li", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 468, "end_line": 492, "span_ids": ["ExceptionInfo._getreprcrash", "ExceptionInfo.exconly", "ExceptionInfo.errisinstance"], "tokens": 240}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s(repr=False)\nclass ExceptionInfo(object):\n\n def exconly(self, tryshort=False):\n \"\"\" return the exception as a string\n\n when 'tryshort' resolves to True, and the exception is a\n _pytest._code._AssertionError, only the actual exception part of\n the exception representation is returned (so 'AssertionError: ' is\n removed from the beginning)\n \"\"\"\n lines = format_exception_only(self.type, self.value)\n text = \"\".join(lines)\n text = text.rstrip()\n if tryshort:\n if text.startswith(self._striptext):\n text = text[len(self._striptext) :]\n return text\n\n def errisinstance(self, exc):\n \"\"\" return True if the exception is an instance of exc \"\"\"\n return isinstance(self.value, exc)\n\n def _getreprcrash(self):\n exconly = self.exconly(tryshort=True)\n entry = self.traceback.getcrashentry()\n path, lineno = entry.frame.code.raw.co_filename, entry.lineno\n return ReprFileLocation(path, lineno + 1, exconly)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ExceptionInfo.getrepr_ExceptionInfo.getrepr.return.fmt_repr_excinfo_self_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ExceptionInfo.getrepr_ExceptionInfo.getrepr.return.fmt_repr_excinfo_self_", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 494, "end_line": 551, "span_ids": ["ExceptionInfo.getrepr"], "tokens": 369}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s(repr=False)\nclass ExceptionInfo(object):\n\n def getrepr(\n self,\n showlocals=False,\n style=\"long\",\n abspath=False,\n tbfilter=True,\n funcargs=False,\n truncate_locals=True,\n chain=True,\n ):\n \"\"\"\n Return str()able representation of this exception info.\n\n :param bool showlocals:\n Show locals per traceback entry.\n Ignored if ``style==\"native\"``.\n\n :param str style: long|short|no|native traceback style\n\n :param bool abspath:\n If paths should be changed to absolute or left unchanged.\n\n :param bool tbfilter:\n Hide entries that contain a local variable ``__tracebackhide__==True``.\n Ignored if ``style==\"native\"``.\n\n :param bool funcargs:\n Show fixtures (\"funcargs\" for legacy purposes) per traceback entry.\n\n :param bool truncate_locals:\n With ``showlocals==True``, make sure locals can be safely represented as strings.\n\n :param bool chain: if chained exceptions in Python 3 should be shown.\n\n .. versionchanged:: 3.9\n\n Added the ``chain`` parameter.\n \"\"\"\n if style == \"native\":\n return ReprExceptionInfo(\n ReprTracebackNative(\n traceback.format_exception(\n self.type, self.value, self.traceback[0]._rawentry\n )\n ),\n self._getreprcrash(),\n )\n\n fmt = FormattedExcinfo(\n showlocals=showlocals,\n style=style,\n abspath=abspath,\n tbfilter=tbfilter,\n funcargs=funcargs,\n truncate_locals=truncate_locals,\n chain=chain,\n )\n return fmt.repr_excinfo(self)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ExceptionInfo.__str___ExceptionInfo.match.return.True": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ExceptionInfo.__str___ExceptionInfo.match.return.True", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 553, "end_line": 575, "span_ids": ["ExceptionInfo.match", "ExceptionInfo.__unicode__", "ExceptionInfo.__str__"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s(repr=False)\nclass ExceptionInfo(object):\n\n def __str__(self):\n if self._excinfo is None:\n return repr(self)\n entry = self.traceback[-1]\n loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())\n return str(loc)\n\n def __unicode__(self):\n entry = self.traceback[-1]\n loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())\n return text_type(loc)\n\n def match(self, regexp):\n \"\"\"\n Match the regular expression 'regexp' on the string representation of\n the exception. If it matches then True is returned (so that it is\n possible to write 'assert excinfo.match()'). If it doesn't match an\n AssertionError is raised.\n \"\"\"\n __tracebackhide__ = True\n if not re.search(regexp, str(self.value)):\n assert 0, \"Pattern '{!s}' not found in '{!s}'\".format(regexp, self.value)\n return True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_FormattedExcinfo_FormattedExcinfo.repr_args.if_self_funcargs_.return.ReprFuncArgs_args_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_FormattedExcinfo_FormattedExcinfo.repr_args.if_self_funcargs_.return.ReprFuncArgs_args_", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 578, "end_line": 621, "span_ids": ["FormattedExcinfo.repr_args", "FormattedExcinfo", "FormattedExcinfo._getindent", "FormattedExcinfo._getentrysource"], "tokens": 308}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s\nclass FormattedExcinfo(object):\n \"\"\" presenting information about failing Functions and Generators. \"\"\"\n\n # for traceback entries\n flow_marker = \">\"\n fail_marker = \"E\"\n\n showlocals = attr.ib(default=False)\n style = attr.ib(default=\"long\")\n abspath = attr.ib(default=True)\n tbfilter = attr.ib(default=True)\n funcargs = attr.ib(default=False)\n truncate_locals = attr.ib(default=True)\n chain = attr.ib(default=True)\n astcache = attr.ib(default=attr.Factory(dict), init=False, repr=False)\n\n def _getindent(self, source):\n # figure out indent for given source\n try:\n s = str(source.getstatement(len(source) - 1))\n except KeyboardInterrupt:\n raise\n except: # noqa\n try:\n s = str(source[-1])\n except KeyboardInterrupt:\n raise\n except: # noqa\n return 0\n return 4 + (len(s) - len(s.lstrip()))\n\n def _getentrysource(self, entry):\n source = entry.getsource(self.astcache)\n if source is not None:\n source = source.deindent()\n return source\n\n def repr_args(self, entry):\n if self.funcargs:\n args = []\n for argname, argvalue in entry.frame.getargs(var=True):\n args.append((argname, saferepr(argvalue)))\n return ReprFuncArgs(args)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_FormattedExcinfo.get_source_FormattedExcinfo.get_exconly.return.lines": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_FormattedExcinfo.get_source_FormattedExcinfo.get_exconly.return.lines", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 623, "end_line": 657, "span_ids": ["FormattedExcinfo.get_source", "FormattedExcinfo.get_exconly"], "tokens": 327}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s\nclass FormattedExcinfo(object):\n\n def get_source(self, source, line_index=-1, excinfo=None, short=False):\n \"\"\" return formatted and marked up source lines. \"\"\"\n import _pytest._code\n\n lines = []\n if source is None or line_index >= len(source.lines):\n source = _pytest._code.Source(\"???\")\n line_index = 0\n if line_index < 0:\n line_index += len(source)\n space_prefix = \" \"\n if short:\n lines.append(space_prefix + source.lines[line_index].strip())\n else:\n for line in source.lines[:line_index]:\n lines.append(space_prefix + line)\n lines.append(self.flow_marker + \" \" + source.lines[line_index])\n for line in source.lines[line_index + 1 :]:\n lines.append(space_prefix + line)\n if excinfo is not None:\n indent = 4 if short else self._getindent(source)\n lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))\n return lines\n\n def get_exconly(self, excinfo, indent=4, markall=False):\n lines = []\n indent = \" \" * indent\n # get the real exception information out\n exlines = excinfo.exconly(tryshort=True).split(\"\\n\")\n failindent = self.fail_marker + indent[1:]\n for line in exlines:\n lines.append(failindent + line)\n if not markall:\n failindent = indent\n return lines", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_FormattedExcinfo.repr_locals_FormattedExcinfo.repr_locals.if_self_showlocals_.return.ReprLocals_lines_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_FormattedExcinfo.repr_locals_FormattedExcinfo.repr_locals.if_self_showlocals_.return.ReprLocals_lines_", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 659, "end_line": 683, "span_ids": ["FormattedExcinfo.repr_locals"], "tokens": 240}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s\nclass FormattedExcinfo(object):\n\n def repr_locals(self, locals):\n if self.showlocals:\n lines = []\n keys = [loc for loc in locals if loc[0] != \"@\"]\n keys.sort()\n for name in keys:\n value = locals[name]\n if name == \"__builtins__\":\n lines.append(\"__builtins__ = \")\n else:\n # This formatting could all be handled by the\n # _repr() function, which is only reprlib.Repr in\n # disguise, so is very configurable.\n if self.truncate_locals:\n str_repr = saferepr(value)\n else:\n str_repr = safeformat(value)\n # if len(str_repr) < 70 or not isinstance(value,\n # (list, tuple, dict)):\n lines.append(\"%-10s = %s\" % (name, str_repr))\n # else:\n # self._line(\"%-10s =\\\\\" % (name,))\n # # XXX\n # pprint.pprint(value, stream=self.excinfowriter)\n return ReprLocals(lines)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_FormattedExcinfo.repr_traceback_entry_FormattedExcinfo.repr_traceback_entry.return.ReprEntry_lines_None_No": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_FormattedExcinfo.repr_traceback_entry_FormattedExcinfo.repr_traceback_entry.return.ReprEntry_lines_None_No", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 685, "end_line": 717, "span_ids": ["FormattedExcinfo.repr_traceback_entry"], "tokens": 329}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s\nclass FormattedExcinfo(object):\n\n def repr_traceback_entry(self, entry, excinfo=None):\n import _pytest._code\n\n source = self._getentrysource(entry)\n if source is None:\n source = _pytest._code.Source(\"???\")\n line_index = 0\n else:\n # entry.getfirstlinesource() can be -1, should be 0 on jython\n line_index = entry.lineno - max(entry.getfirstlinesource(), 0)\n\n lines = []\n style = entry._repr_style\n if style is None:\n style = self.style\n if style in (\"short\", \"long\"):\n short = style == \"short\"\n reprargs = self.repr_args(entry) if not short else None\n s = self.get_source(source, line_index, excinfo, short=short)\n lines.extend(s)\n if short:\n message = \"in %s\" % (entry.name)\n else:\n message = excinfo and excinfo.typename or \"\"\n path = self._makepath(entry.path)\n filelocrepr = ReprFileLocation(path, entry.lineno + 1, message)\n localsrepr = None\n if not short:\n localsrepr = self.repr_locals(entry.locals)\n return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style)\n if excinfo:\n lines.extend(self.get_exconly(excinfo, indent=4))\n return ReprEntry(lines, None, None, None, style)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_FormattedExcinfo._makepath_FormattedExcinfo.repr_traceback.return.ReprTraceback_entries_ex": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_FormattedExcinfo._makepath_FormattedExcinfo.repr_traceback.return.ReprTraceback_entries_ex", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 719, "end_line": 745, "span_ids": ["FormattedExcinfo.repr_traceback", "FormattedExcinfo._makepath"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s\nclass FormattedExcinfo(object):\n\n def _makepath(self, path):\n if not self.abspath:\n try:\n np = py.path.local().bestrelpath(path)\n except OSError:\n return path\n if len(np) < len(str(path)):\n path = np\n return path\n\n def repr_traceback(self, excinfo):\n traceback = excinfo.traceback\n if self.tbfilter:\n traceback = traceback.filter()\n\n if is_recursion_error(excinfo):\n traceback, extraline = self._truncate_recursive_traceback(traceback)\n else:\n extraline = None\n\n last = traceback[-1]\n entries = []\n for index, entry in enumerate(traceback):\n einfo = (last == entry) and excinfo or None\n reprentry = self.repr_traceback_entry(entry, einfo)\n entries.append(reprentry)\n return ReprTraceback(entries, extraline, style=self.style)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_FormattedExcinfo._truncate_recursive_traceback_FormattedExcinfo._truncate_recursive_traceback.return.traceback_extraline": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_FormattedExcinfo._truncate_recursive_traceback_FormattedExcinfo._truncate_recursive_traceback.return.traceback_extraline", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 747, "end_line": 782, "span_ids": ["FormattedExcinfo._truncate_recursive_traceback"], "tokens": 340}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s\nclass FormattedExcinfo(object):\n\n def _truncate_recursive_traceback(self, traceback):\n \"\"\"\n Truncate the given recursive traceback trying to find the starting point\n of the recursion.\n\n The detection is done by going through each traceback entry and finding the\n point in which the locals of the frame are equal to the locals of a previous frame (see ``recursionindex()``.\n\n Handle the situation where the recursion process might raise an exception (for example\n comparing numpy arrays using equality raises a TypeError), in which case we do our best to\n warn the user of the error and show a limited traceback.\n \"\"\"\n try:\n recursionindex = traceback.recursionindex()\n except Exception as e:\n max_frames = 10\n extraline = (\n \"!!! Recursion error detected, but an error occurred locating the origin of recursion.\\n\"\n \" The following exception happened when comparing locals in the stack frame:\\n\"\n \" {exc_type}: {exc_msg}\\n\"\n \" Displaying first and last {max_frames} stack frames out of {total}.\"\n ).format(\n exc_type=type(e).__name__,\n exc_msg=safe_str(e),\n max_frames=max_frames,\n total=len(traceback),\n )\n traceback = traceback[:max_frames] + traceback[-max_frames:]\n else:\n if recursionindex is not None:\n extraline = \"!!! Recursion detected (same locals & position)\"\n traceback = traceback[: recursionindex + 1]\n else:\n extraline = None\n\n return traceback, extraline", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_FormattedExcinfo.repr_excinfo_FormattedExcinfo.repr_excinfo.if__PY2_.else_.return.ExceptionChainRepr_repr_c": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_FormattedExcinfo.repr_excinfo_FormattedExcinfo.repr_excinfo.if__PY2_.else_.return.ExceptionChainRepr_repr_c", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 784, "end_line": 832, "span_ids": ["FormattedExcinfo.repr_excinfo"], "tokens": 392}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s\nclass FormattedExcinfo(object):\n\n def repr_excinfo(self, excinfo):\n if _PY2:\n reprtraceback = self.repr_traceback(excinfo)\n reprcrash = excinfo._getreprcrash()\n\n return ReprExceptionInfo(reprtraceback, reprcrash)\n else:\n repr_chain = []\n e = excinfo.value\n descr = None\n seen = set()\n while e is not None and id(e) not in seen:\n seen.add(id(e))\n if excinfo:\n reprtraceback = self.repr_traceback(excinfo)\n reprcrash = excinfo._getreprcrash()\n else:\n # fallback to native repr if the exception doesn't have a traceback:\n # ExceptionInfo objects require a full traceback to work\n reprtraceback = ReprTracebackNative(\n traceback.format_exception(type(e), e, None)\n )\n reprcrash = None\n\n repr_chain += [(reprtraceback, reprcrash, descr)]\n if e.__cause__ is not None and self.chain:\n e = e.__cause__\n excinfo = (\n ExceptionInfo((type(e), e, e.__traceback__))\n if e.__traceback__\n else None\n )\n descr = \"The above exception was the direct cause of the following exception:\"\n elif (\n e.__context__ is not None\n and not e.__suppress_context__\n and self.chain\n ):\n e = e.__context__\n excinfo = (\n ExceptionInfo((type(e), e, e.__traceback__))\n if e.__traceback__\n else None\n )\n descr = \"During handling of the above exception, another exception occurred:\"\n else:\n e = None\n repr_chain.reverse()\n return ExceptionChainRepr(repr_chain)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_TerminalRepr_ExceptionRepr.toterminal.for_name_content_sep_in.tw_line_content_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_TerminalRepr_ExceptionRepr.toterminal.for_name_content_sep_in.tw_line_content_", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 835, "end_line": 864, "span_ids": ["TerminalRepr.__repr__", "ExceptionRepr.addsection", "ExceptionRepr", "TerminalRepr.__str__", "ExceptionRepr.toterminal", "TerminalRepr", "TerminalRepr.__unicode__"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalRepr(object):\n def __str__(self):\n s = self.__unicode__()\n if _PY2:\n s = s.encode(\"utf-8\")\n return s\n\n def __unicode__(self):\n # FYI this is called from pytest-xdist's serialization of exception\n # information.\n io = py.io.TextIO()\n tw = py.io.TerminalWriter(file=io)\n self.toterminal(tw)\n return io.getvalue().strip()\n\n def __repr__(self):\n return \"<%s instance at %0x>\" % (self.__class__, id(self))\n\n\nclass ExceptionRepr(TerminalRepr):\n def __init__(self):\n self.sections = []\n\n def addsection(self, name, content, sep=\"-\"):\n self.sections.append((name, content, sep))\n\n def toterminal(self, tw):\n for name, content, sep in self.sections:\n tw.sep(sep, name)\n tw.line(content)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ExceptionChainRepr_ReprExceptionInfo.toterminal.super_ReprExceptionInfo_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ExceptionChainRepr_ReprExceptionInfo.toterminal.super_ReprExceptionInfo_", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 867, "end_line": 893, "span_ids": ["ReprExceptionInfo", "ExceptionChainRepr.toterminal", "ReprExceptionInfo.toterminal", "ExceptionChainRepr"], "tokens": 246}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ExceptionChainRepr(ExceptionRepr):\n def __init__(self, chain):\n super(ExceptionChainRepr, self).__init__()\n self.chain = chain\n # reprcrash and reprtraceback of the outermost (the newest) exception\n # in the chain\n self.reprtraceback = chain[-1][0]\n self.reprcrash = chain[-1][1]\n\n def toterminal(self, tw):\n for element in self.chain:\n element[0].toterminal(tw)\n if element[2] is not None:\n tw.line(\"\")\n tw.line(element[2], yellow=True)\n super(ExceptionChainRepr, self).toterminal(tw)\n\n\nclass ReprExceptionInfo(ExceptionRepr):\n def __init__(self, reprtraceback, reprcrash):\n super(ReprExceptionInfo, self).__init__()\n self.reprtraceback = reprtraceback\n self.reprcrash = reprcrash\n\n def toterminal(self, tw):\n self.reprtraceback.toterminal(tw)\n super(ReprExceptionInfo, self).toterminal(tw)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ReprTraceback_ReprEntryNative.toterminal.tw_write_join_self_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ReprTraceback_ReprEntryNative.toterminal.tw_write_join_self_lin", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 896, "end_line": 937, "span_ids": ["ReprTraceback.toterminal", "ReprTracebackNative", "ReprTraceback", "ReprEntryNative", "ReprEntryNative.toterminal"], "tokens": 294}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ReprTraceback(TerminalRepr):\n entrysep = \"_ \"\n\n def __init__(self, reprentries, extraline, style):\n self.reprentries = reprentries\n self.extraline = extraline\n self.style = style\n\n def toterminal(self, tw):\n # the entries might have different styles\n for i, entry in enumerate(self.reprentries):\n if entry.style == \"long\":\n tw.line(\"\")\n entry.toterminal(tw)\n if i < len(self.reprentries) - 1:\n next_entry = self.reprentries[i + 1]\n if (\n entry.style == \"long\"\n or entry.style == \"short\"\n and next_entry.style == \"long\"\n ):\n tw.sep(self.entrysep)\n\n if self.extraline:\n tw.line(self.extraline)\n\n\nclass ReprTracebackNative(ReprTraceback):\n def __init__(self, tblines):\n self.style = \"native\"\n self.reprentries = [ReprEntryNative(tblines)]\n self.extraline = None\n\n\nclass ReprEntryNative(TerminalRepr):\n style = \"native\"\n\n def __init__(self, tblines):\n self.lines = tblines\n\n def toterminal(self, tw):\n tw.write(\"\".join(self.lines))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ReprEntry_ReprEntry.__str__.return._s_n_s_n_s_n_join": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ReprEntry_ReprEntry.__str__.return._s_n_s_n_s_n_join", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 940, "end_line": 970, "span_ids": ["ReprEntry.toterminal", "ReprEntry.__str__", "ReprEntry"], "tokens": 279}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ReprEntry(TerminalRepr):\n def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style):\n self.lines = lines\n self.reprfuncargs = reprfuncargs\n self.reprlocals = reprlocals\n self.reprfileloc = filelocrepr\n self.style = style\n\n def toterminal(self, tw):\n if self.style == \"short\":\n self.reprfileloc.toterminal(tw)\n for line in self.lines:\n red = line.startswith(\"E \")\n tw.line(line, bold=True, red=red)\n # tw.line(\"\")\n return\n if self.reprfuncargs:\n self.reprfuncargs.toterminal(tw)\n for line in self.lines:\n red = line.startswith(\"E \")\n tw.line(line, bold=True, red=red)\n if self.reprlocals:\n tw.line(\"\")\n self.reprlocals.toterminal(tw)\n if self.reprfileloc:\n if self.lines:\n tw.line(\"\")\n self.reprfileloc.toterminal(tw)\n\n def __str__(self):\n return \"%s\\n%s\\n%s\" % (\"\\n\".join(self.lines), self.reprlocals, self.reprfileloc)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ReprFileLocation_ReprLocals.toterminal.for_line_in_self_lines_.tw_line_line_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ReprFileLocation_ReprLocals.toterminal.for_line_in_self_lines_.tw_line_line_", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 973, "end_line": 996, "span_ids": ["ReprLocals", "ReprFileLocation", "ReprLocals.toterminal", "ReprFileLocation.toterminal"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ReprFileLocation(TerminalRepr):\n def __init__(self, path, lineno, message):\n self.path = str(path)\n self.lineno = lineno\n self.message = message\n\n def toterminal(self, tw):\n # filename and lineno output for each entry,\n # using an output format that most editors unterstand\n msg = self.message\n i = msg.find(\"\\n\")\n if i != -1:\n msg = msg[:i]\n tw.write(self.path, bold=True, red=True)\n tw.line(\":%s: %s\" % (self.lineno, msg))\n\n\nclass ReprLocals(TerminalRepr):\n def __init__(self, lines):\n self.lines = lines\n\n def toterminal(self, tw):\n for line in self.lines:\n tw.line(line)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ReprFuncArgs_ReprFuncArgs.toterminal.if_self_args_.tw_line_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_ReprFuncArgs_ReprFuncArgs.toterminal.if_self_args_.tw_line_", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 999, "end_line": 1019, "span_ids": ["ReprFuncArgs.toterminal", "ReprFuncArgs"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ReprFuncArgs(TerminalRepr):\n def __init__(self, args):\n self.args = args\n\n def toterminal(self, tw):\n if self.args:\n linesofar = \"\"\n for name, value in self.args:\n ns = \"%s = %s\" % (safe_str(name), safe_str(value))\n if len(ns) + len(linesofar) + 2 > tw.fullwidth:\n if linesofar:\n tw.line(linesofar)\n linesofar = ns\n else:\n if linesofar:\n linesofar += \", \" + ns\n else:\n linesofar = ns\n if linesofar:\n tw.line(linesofar)\n tw.line(\"\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_getrawcode_getrawcode.try_.except_AttributeError_.return.obj": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_getrawcode_getrawcode.try_.except_AttributeError_.return.obj", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1022, "end_line": 1036, "span_ids": ["getrawcode"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def getrawcode(obj, trycall=True):\n \"\"\" return code object for given function. \"\"\"\n try:\n return obj.__code__\n except AttributeError:\n obj = getattr(obj, \"im_func\", obj)\n obj = getattr(obj, \"func_code\", obj)\n obj = getattr(obj, \"f_code\", obj)\n obj = getattr(obj, \"__code__\", obj)\n if trycall and not hasattr(obj, \"co_firstlineno\"):\n if hasattr(obj, \"__call__\") and not inspect.isclass(obj):\n x = getrawcode(obj.__call__, trycall=False)\n if hasattr(x, \"co_firstlineno\"):\n return x\n return obj", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_if_PY35_RecursionErro__PY_DIR.py_path_local_py___file__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_if_PY35_RecursionErro__PY_DIR.py_path_local_py___file__", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1039, "end_line": 1066, "span_ids": ["impl:5"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "if PY35: # RecursionError introduced in 3.5\n\n def is_recursion_error(excinfo):\n return excinfo.errisinstance(RecursionError) # noqa\n\n\nelse:\n\n def is_recursion_error(excinfo):\n if not excinfo.errisinstance(RuntimeError):\n return False\n try:\n return \"maximum recursion depth exceeded\" in str(excinfo.value)\n except UnicodeError:\n return False\n\n\n# relative paths that we use to filter traceback entries from appearing to the user;\n# see filter_traceback\n# note: if we need to add more paths than what we have now we should probably use a list\n# for better maintenance\n\n_PLUGGY_DIR = py.path.local(pluggy.__file__.rstrip(\"oc\"))\n# pluggy is either a package or a single module depending on the version\nif _PLUGGY_DIR.basename == \"__init__.py\":\n _PLUGGY_DIR = _PLUGGY_DIR.dirpath()\n_PYTEST_DIR = py.path.local(_pytest.__file__).dirpath()\n_PY_DIR = py.path.local(py.__file__).dirpath()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_filter_traceback_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/code.py_filter_traceback_", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/code.py", "file_name": "code.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1069, "end_line": 1087, "span_ids": ["filter_traceback"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def filter_traceback(entry):\n \"\"\"Return True if a TracebackEntry instance should be removed from tracebacks:\n * dynamically generated code (no code to show up for it);\n * internal traceback from pytest or its internal libraries, py and pluggy.\n \"\"\"\n # entry.path might sometimes return a str object when the entry\n # points to dynamically generated code\n # see https://bitbucket.org/pytest-dev/py/issues/71\n raw_filename = entry.frame.code.raw.co_filename\n is_generated = \"<\" in raw_filename and \">\" in raw_filename\n if is_generated:\n return False\n # entry.path might point to a non-existing file, in which case it will\n # also return a str object. see #1133\n p = py.path.local(entry.path)\n return (\n not p.relto(_PLUGGY_DIR) and not p.relto(_PYTEST_DIR) and not p.relto(_PY_DIR)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/source.py_from___future___import_ab_Source.__str__.return._n_join_self_lines_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/source.py_from___future___import_ab_Source.__str__.return._n_join_self_lines_", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/source.py", "file_name": "source.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 142, "span_ids": ["Source.__str__", "Source.indent", "Source:5", "Source.__eq__", "Source.strip", "Source.getstatementrange", "Source", "Source.getstatement", "Source.__getitem__", "imports", "Source.__len__", "Source.deindent", "Source.isparseable", "Source.putaround"], "tokens": 923}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport ast\nimport inspect\nimport linecache\nimport sys\nimport textwrap\nimport tokenize\nimport warnings\nfrom ast import PyCF_ONLY_AST as _AST_FLAG\nfrom bisect import bisect_right\n\nimport py\nimport six\n\n\nclass Source(object):\n \"\"\" an immutable object holding a source code fragment,\n possibly deindenting it.\n \"\"\"\n\n _compilecounter = 0\n\n def __init__(self, *parts, **kwargs):\n self.lines = lines = []\n de = kwargs.get(\"deindent\", True)\n for part in parts:\n if not part:\n partlines = []\n elif isinstance(part, Source):\n partlines = part.lines\n elif isinstance(part, (tuple, list)):\n partlines = [x.rstrip(\"\\n\") for x in part]\n elif isinstance(part, six.string_types):\n partlines = part.split(\"\\n\")\n else:\n partlines = getsource(part, deindent=de).lines\n if de:\n partlines = deindent(partlines)\n lines.extend(partlines)\n\n def __eq__(self, other):\n try:\n return self.lines == other.lines\n except AttributeError:\n if isinstance(other, str):\n return str(self) == other\n return False\n\n __hash__ = None\n\n def __getitem__(self, key):\n if isinstance(key, int):\n return self.lines[key]\n else:\n if key.step not in (None, 1):\n raise IndexError(\"cannot slice a Source with a step\")\n newsource = Source()\n newsource.lines = self.lines[key.start : key.stop]\n return newsource\n\n def __len__(self):\n return len(self.lines)\n\n def strip(self):\n \"\"\" return new source object with trailing\n and leading blank lines removed.\n \"\"\"\n start, end = 0, len(self)\n while start < end and not self.lines[start].strip():\n start += 1\n while end > start and not self.lines[end - 1].strip():\n end -= 1\n source = Source()\n source.lines[:] = self.lines[start:end]\n return source\n\n def putaround(self, before=\"\", after=\"\", indent=\" \" * 4):\n \"\"\" return a copy of the source object with\n 'before' and 'after' wrapped around it.\n \"\"\"\n before = Source(before)\n after = Source(after)\n newsource = Source()\n lines = [(indent + line) for line in self.lines]\n newsource.lines = before.lines + lines + after.lines\n return newsource\n\n def indent(self, indent=\" \" * 4):\n \"\"\" return a copy of the source object with\n all lines indented by the given indent-string.\n \"\"\"\n newsource = Source()\n newsource.lines = [(indent + line) for line in self.lines]\n return newsource\n\n def getstatement(self, lineno):\n \"\"\" return Source statement which contains the\n given linenumber (counted from 0).\n \"\"\"\n start, end = self.getstatementrange(lineno)\n return self[start:end]\n\n def getstatementrange(self, lineno):\n \"\"\" return (start, end) tuple which spans the minimal\n statement region which containing the given lineno.\n \"\"\"\n if not (0 <= lineno < len(self)):\n raise IndexError(\"lineno out of range\")\n ast, start, end = getstatementrange_ast(lineno, self)\n return start, end\n\n def deindent(self):\n \"\"\"return a new source object deindented.\"\"\"\n newsource = Source()\n newsource.lines[:] = deindent(self.lines)\n return newsource\n\n def isparseable(self, deindent=True):\n \"\"\" return True if source is parseable, heuristically\n deindenting it by default.\n \"\"\"\n from parser import suite as syntax_checker\n\n if deindent:\n source = str(self.deindent())\n else:\n source = str(self)\n try:\n # compile(source+'\\n', \"x\", \"exec\")\n syntax_checker(source + \"\\n\")\n except KeyboardInterrupt:\n raise\n except Exception:\n return False\n else:\n return True\n\n def __str__(self):\n return \"\\n\".join(self.lines)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/source.py_Source.compile_Source.compile.try_.else_.return.co": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/source.py_Source.compile_Source.compile.try_.else_.return.co", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/source.py", "file_name": "source.py", "file_type": "text/x-python", "category": "implementation", "start_line": 144, "end_line": 181, "span_ids": ["Source.compile"], "tokens": 393}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Source(object):\n\n def compile(\n self, filename=None, mode=\"exec\", flag=0, dont_inherit=0, _genframe=None\n ):\n \"\"\" return compiled code object. if filename is None\n invent an artificial filename which displays\n the source/line position of the caller frame.\n \"\"\"\n if not filename or py.path.local(filename).check(file=0):\n if _genframe is None:\n _genframe = sys._getframe(1) # the caller\n fn, lineno = _genframe.f_code.co_filename, _genframe.f_lineno\n base = \"<%d-codegen \" % self._compilecounter\n self.__class__._compilecounter += 1\n if not filename:\n filename = base + \"%s:%d>\" % (fn, lineno)\n else:\n filename = base + \"%r %s:%d>\" % (filename, fn, lineno)\n source = \"\\n\".join(self.lines) + \"\\n\"\n try:\n co = compile(source, filename, mode, flag)\n except SyntaxError:\n ex = sys.exc_info()[1]\n # re-represent syntax errors from parsing python strings\n msglines = self.lines[: ex.lineno]\n if ex.offset:\n msglines.append(\" \" * ex.offset + \"^\")\n msglines.append(\"(code was compiled probably from here: %s)\" % filename)\n newex = SyntaxError(\"\\n\".join(msglines))\n newex.offset = ex.offset\n newex.lineno = ex.lineno\n newex.text = ex.text\n raise newex\n else:\n if flag & _AST_FLAG:\n return co\n lines = [(x + \"\\n\") for x in self.lines]\n linecache.cache[filename] = (1, None, lines, filename)\n return co", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/source.py___compile_.return.co": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/source.py___compile_.return.co", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/source.py", "file_name": "source.py", "file_type": "text/x-python", "category": "implementation", "start_line": 184, "end_line": 201, "span_ids": ["Source.compile", "compile_"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "#\n# public API shortcut functions\n#\n\n\ndef compile_(source, filename=None, mode=\"exec\", flags=0, dont_inherit=0):\n \"\"\" compile the given source to a raw code object,\n and maintain an internal cache which allows later\n retrieval of the source code for the code object\n and any recursively created code objects.\n \"\"\"\n if isinstance(source, ast.AST):\n # XXX should Source support having AST?\n return compile(source, filename, mode, flags, dont_inherit)\n _genframe = sys._getframe(1) # the caller\n s = Source(source)\n co = s.compile(filename, mode, flags, _genframe=_genframe)\n return co", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/source.py_getfslineno_getfslineno.return.fspath_lineno": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/source.py_getfslineno_getfslineno.return.fspath_lineno", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/source.py", "file_name": "source.py", "file_type": "text/x-python", "category": "implementation", "start_line": 204, "end_line": 231, "span_ids": ["getfslineno"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def getfslineno(obj):\n \"\"\" Return source location (path, lineno) for the given object.\n If the source cannot be determined return (\"\", -1).\n\n The line number is 0-based.\n \"\"\"\n from .code import Code\n\n try:\n code = Code(obj)\n except TypeError:\n try:\n fn = inspect.getsourcefile(obj) or inspect.getfile(obj)\n except TypeError:\n return \"\", -1\n\n fspath = fn and py.path.local(fn) or None\n lineno = -1\n if fspath:\n try:\n _, lineno = findsource(obj)\n except IOError:\n pass\n else:\n fspath = code.path\n lineno = code.firstlineno\n assert isinstance(lineno, int)\n return fspath, lineno", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/source.py_None_3_deindent.return.textwrap_dedent_n_join": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/source.py_None_3_deindent.return.textwrap_dedent_n_join", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/source.py", "file_name": "source.py", "file_type": "text/x-python", "category": "implementation", "start_line": 234, "end_line": 262, "span_ids": ["deindent", "getfslineno", "findsource", "getsource"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "#\n# helper functions\n#\n\n\ndef findsource(obj):\n try:\n sourcelines, lineno = inspect.findsource(obj)\n except Exception:\n return None, -1\n source = Source()\n source.lines = [line.rstrip() for line in sourcelines]\n return source, lineno\n\n\ndef getsource(obj, **kwargs):\n from .code import getrawcode\n\n obj = getrawcode(obj)\n try:\n strsrc = inspect.getsource(obj)\n except IndentationError:\n strsrc = '\"Buggy python version consider upgrading, cannot get source\"'\n assert isinstance(strsrc, str)\n return Source(strsrc, **kwargs)\n\n\ndef deindent(lines):\n return textwrap.dedent(\"\\n\".join(lines)).splitlines()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/source.py_get_statement_startend2_get_statement_startend2.return.start_end": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/source.py_get_statement_startend2_get_statement_startend2.return.start_end", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/source.py", "file_name": "source.py", "file_type": "text/x-python", "category": "implementation", "start_line": 265, "end_line": 286, "span_ids": ["get_statement_startend2"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_statement_startend2(lineno, node):\n import ast\n\n # flatten all statements and except handlers into one lineno-list\n # AST's line numbers start indexing at 1\n values = []\n for x in ast.walk(node):\n if isinstance(x, (ast.stmt, ast.ExceptHandler)):\n values.append(x.lineno - 1)\n for name in (\"finalbody\", \"orelse\"):\n val = getattr(x, name, None)\n if val:\n # treat the finally/orelse part as its own statement\n values.append(val[0].lineno - 1 - 1)\n values.sort()\n insert_index = bisect_right(values, lineno)\n start = values[insert_index - 1]\n if insert_index >= len(values):\n end = None\n else:\n end = values[insert_index]\n return start, end", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/source.py_getstatementrange_ast_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_code/source.py_getstatementrange_ast_", "embedding": null, "metadata": {"file_path": "src/_pytest/_code/source.py", "file_name": "source.py", "file_type": "text/x-python", "category": "implementation", "start_line": 289, "end_line": 329, "span_ids": ["getstatementrange_ast"], "tokens": 369}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def getstatementrange_ast(lineno, source, assertion=False, astnode=None):\n if astnode is None:\n content = str(source)\n # See #4260:\n # don't produce duplicate warnings when compiling source to find ast\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n astnode = compile(content, \"source\", \"exec\", _AST_FLAG)\n\n start, end = get_statement_startend2(lineno, astnode)\n # we need to correct the end:\n # - ast-parsing strips comments\n # - there might be empty lines\n # - we might have lesser indented code blocks at the end\n if end is None:\n end = len(source.lines)\n\n if end > start + 1:\n # make sure we don't span differently indented code blocks\n # by using the BlockFinder helper used which inspect.getsource() uses itself\n block_finder = inspect.BlockFinder()\n # if we start with an indented line, put blockfinder to \"started\" mode\n block_finder.started = source.lines[start][0].isspace()\n it = ((x + \"\\n\") for x in source.lines[start:end])\n try:\n for tok in tokenize.generate_tokens(lambda: next(it)):\n block_finder.tokeneater(*tok)\n except (inspect.EndOfBlock, IndentationError):\n end = block_finder.last + start\n except Exception:\n pass\n\n # the end might still point to a comment or empty line, correct it\n while end:\n line = source.lines[end - 1].lstrip()\n if line.startswith(\"#\") or not line:\n end -= 1\n else:\n break\n return astnode, start, end", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_io/saferepr.py_pprint__call_and_format_exception.try_.except_Exception_as_exc_.return._s_s_raised_in_rep": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_io/saferepr.py_pprint__call_and_format_exception.try_.except_Exception_as_exc_.return._s_s_raised_in_rep", "embedding": null, "metadata": {"file_path": "src/_pytest/_io/saferepr.py", "file_name": "saferepr.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 21, "span_ids": ["imports", "_call_and_format_exception"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pprint\n\nfrom six.moves import reprlib\n\n\ndef _call_and_format_exception(call, x, *args):\n try:\n # Try the vanilla repr and make sure that the result is a string\n return call(x, *args)\n except Exception as exc:\n exc_name = type(exc).__name__\n try:\n exc_info = str(exc)\n except Exception:\n exc_info = \"unknown\"\n return '<[%s(\"%s\") raised in repr()] %s object at 0x%x>' % (\n exc_name,\n exc_info,\n x.__class__.__name__,\n id(x),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_io/saferepr.py_SafeRepr_SafeRepr.repr_unicode.return.s": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_io/saferepr.py_SafeRepr_SafeRepr.repr_unicode.return.s", "embedding": null, "metadata": {"file_path": "src/_pytest/_io/saferepr.py", "file_name": "saferepr.py", "file_type": "text/x-python", "category": "implementation", "start_line": 24, "end_line": 48, "span_ids": ["SafeRepr", "SafeRepr.repr_unicode", "SafeRepr.repr"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SafeRepr(reprlib.Repr):\n \"\"\"subclass of repr.Repr that limits the resulting size of repr()\n and includes information on exceptions raised during the call.\n \"\"\"\n\n def repr(self, x):\n return self._callhelper(reprlib.Repr.repr, self, x)\n\n def repr_unicode(self, x, level):\n # Strictly speaking wrong on narrow builds\n def repr(u):\n if \"'\" not in u:\n return u\"'%s'\" % u\n elif '\"' not in u:\n return u'\"%s\"' % u\n else:\n return u\"'%s'\" % u.replace(\"'\", r\"\\'\")\n\n s = repr(x[: self.maxstring])\n if len(s) > self.maxstring:\n i = max(0, (self.maxstring - 3) // 2)\n j = max(0, self.maxstring - 3 - i)\n s = repr(x[:i] + x[len(x) - j :])\n s = s[:i] + \"...\" + s[len(s) - j :]\n return s", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_io/saferepr.py_SafeRepr.repr_instance_safeformat.return._call_and_format_exceptio": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_io/saferepr.py_SafeRepr.repr_instance_safeformat.return._call_and_format_exceptio", "embedding": null, "metadata": {"file_path": "src/_pytest/_io/saferepr.py", "file_name": "saferepr.py", "file_type": "text/x-python", "category": "implementation", "start_line": 50, "end_line": 67, "span_ids": ["SafeRepr._callhelper", "SafeRepr.repr_instance", "safeformat"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SafeRepr(reprlib.Repr):\n\n def repr_instance(self, x, level):\n return self._callhelper(repr, x)\n\n def _callhelper(self, call, x, *args):\n s = _call_and_format_exception(call, x, *args)\n if len(s) > self.maxsize:\n i = max(0, (self.maxsize - 3) // 2)\n j = max(0, self.maxsize - 3 - i)\n s = s[:i] + \"...\" + s[len(s) - j :]\n return s\n\n\ndef safeformat(obj):\n \"\"\"return a pretty printed string for the given object.\n Failing __repr__ functions of user instances will be represented\n with a short exception info.\n \"\"\"\n return _call_and_format_exception(pprint.pformat, obj)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_io/saferepr.py_saferepr_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/_io/saferepr.py_saferepr_", "embedding": null, "metadata": {"file_path": "src/_pytest/_io/saferepr.py", "file_name": "saferepr.py", "file_type": "text/x-python", "category": "implementation", "start_line": 70, "end_line": 83, "span_ids": ["saferepr"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def saferepr(obj, maxsize=240):\n \"\"\"return a size-limited safe repr-string for the given object.\n Failing __repr__ functions of user instances will be represented\n with a short exception info and 'saferepr' generally takes\n care to never raise exceptions itself. This function is a wrapper\n around the Repr/reprlib functionality of the standard 2.6 lib.\n \"\"\"\n # review exception handling\n srepr = SafeRepr()\n srepr.maxstring = maxsize\n srepr.maxsize = maxsize\n srepr.maxother = 160\n return srepr.repr(obj)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/__init__.py___pytest_addoption.group_addoption_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/__init__.py___pytest_addoption.group_addoption_", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 31, "span_ids": ["imports", "pytest_addoption", "docstring"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nsupport for presenting detailed information in failing assertions.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\n\nimport six\n\nfrom _pytest.assertion import rewrite\nfrom _pytest.assertion import truncate\nfrom _pytest.assertion import util\n\n\ndef pytest_addoption(parser):\n group = parser.getgroup(\"debugconfig\")\n group.addoption(\n \"--assert\",\n action=\"store\",\n dest=\"assertmode\",\n choices=(\"rewrite\", \"plain\"),\n default=\"rewrite\",\n metavar=\"MODE\",\n help=\"\"\"Control assertion debugging tools. 'plain'\n performs no assertion debugging. 'rewrite'\n (the default) rewrites assert statements in\n test modules on import to provide assert\n expression information.\"\"\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/__init__.py_register_assert_rewrite_register_assert_rewrite.importhook_mark_rewrite_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/__init__.py_register_assert_rewrite_register_assert_rewrite.importhook_mark_rewrite_", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 34, "end_line": 55, "span_ids": ["register_assert_rewrite"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def register_assert_rewrite(*names):\n \"\"\"Register one or more module names to be rewritten on import.\n\n This function will make sure that this module or all modules inside\n the package will get their assert statements rewritten.\n Thus you should make sure to call this before the module is\n actually imported, usually in your __init__.py if you are a plugin\n using a package.\n\n :raise TypeError: if the given module names are not strings.\n \"\"\"\n for name in names:\n if not isinstance(name, str):\n msg = \"expected module names as *args, got {0} instead\"\n raise TypeError(msg.format(repr(names)))\n for hook in sys.meta_path:\n if isinstance(hook, rewrite.AssertionRewritingHook):\n importhook = hook\n break\n else:\n importhook = DummyRewriteHook()\n importhook.mark_rewrite(*names)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/__init__.py_DummyRewriteHook_pytest_collection.if_assertstate_.if_assertstate_hook_is_no.assertstate_hook_set_sess": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/__init__.py_DummyRewriteHook_pytest_collection.if_assertstate_.if_assertstate_hook_is_no.assertstate_hook_set_sess", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 58, "end_line": 101, "span_ids": ["DummyRewriteHook.mark_rewrite", "pytest_collection", "install_importhook", "DummyRewriteHook", "AssertionState"], "tokens": 317}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DummyRewriteHook(object):\n \"\"\"A no-op import hook for when rewriting is disabled.\"\"\"\n\n def mark_rewrite(self, *names):\n pass\n\n\nclass AssertionState(object):\n \"\"\"State for the assertion plugin.\"\"\"\n\n def __init__(self, config, mode):\n self.mode = mode\n self.trace = config.trace.root.get(\"assertion\")\n self.hook = None\n\n\ndef install_importhook(config):\n \"\"\"Try to install the rewrite hook, raise SystemError if it fails.\"\"\"\n # Jython has an AST bug that make the assertion rewriting hook malfunction.\n if sys.platform.startswith(\"java\"):\n raise SystemError(\"rewrite not supported\")\n\n config._assertstate = AssertionState(config, \"rewrite\")\n config._assertstate.hook = hook = rewrite.AssertionRewritingHook(config)\n sys.meta_path.insert(0, hook)\n config._assertstate.trace(\"installed rewrite import hook\")\n\n def undo():\n hook = config._assertstate.hook\n if hook is not None and hook in sys.meta_path:\n sys.meta_path.remove(hook)\n\n config.add_cleanup(undo)\n return hook\n\n\ndef pytest_collection(session):\n # this hook is only called when test modules are collected\n # so for example not in the master process of pytest-xdist\n # (which does not collect test modules)\n assertstate = getattr(session.config, \"_assertstate\", None)\n if assertstate:\n if assertstate.hook is not None:\n assertstate.hook.set_session(session)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/__init__.py_pytest_runtest_setup_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/__init__.py_pytest_runtest_setup_", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 104, "end_line": 156, "span_ids": ["pytest_runtest_teardown", "pytest_runtest_setup", "pytest_sessionfinish", "impl"], "tokens": 407}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_runtest_setup(item):\n \"\"\"Setup the pytest_assertrepr_compare hook\n\n The newinterpret and rewrite modules will use util._reprcompare if\n it exists to use custom reporting via the\n pytest_assertrepr_compare hook. This sets up this custom\n comparison for the test.\n \"\"\"\n\n def callbinrepr(op, left, right):\n \"\"\"Call the pytest_assertrepr_compare hook and prepare the result\n\n This uses the first result from the hook and then ensures the\n following:\n * Overly verbose explanations are truncated unless configured otherwise\n (eg. if running in verbose mode).\n * Embedded newlines are escaped to help util.format_explanation()\n later.\n * If the rewrite mode is used embedded %-characters are replaced\n to protect later % formatting.\n\n The result can be formatted by util.format_explanation() for\n pretty printing.\n \"\"\"\n hook_result = item.ihook.pytest_assertrepr_compare(\n config=item.config, op=op, left=left, right=right\n )\n for new_expl in hook_result:\n if new_expl:\n new_expl = truncate.truncate_if_required(new_expl, item)\n new_expl = [line.replace(\"\\n\", \"\\\\n\") for line in new_expl]\n res = six.text_type(\"\\n~\").join(new_expl)\n if item.config.getvalue(\"assertmode\") == \"rewrite\":\n res = res.replace(\"%\", \"%%\")\n return res\n\n util._reprcompare = callbinrepr\n\n\ndef pytest_runtest_teardown(item):\n util._reprcompare = None\n\n\ndef pytest_sessionfinish(session):\n assertstate = getattr(session.config, \"_assertstate\", None)\n if assertstate:\n if assertstate.hook is not None:\n assertstate.hook.set_session(None)\n\n\n# Expose this plugin's implementation for the pytest_assertrepr_compare hook\npytest_assertrepr_compare = util.assertrepr_compare", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py__Rewrite_assertion_AST__if_sys_version_info_3.else_.ast_Call.return.ast_Call_a_b_c_None_N": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py__Rewrite_assertion_AST__if_sys_version_info_3.else_.ast_Call.return.ast_Call_a_b_c_None_N", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 55, "span_ids": ["imports", "docstring", "impl"], "tokens": 367}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"Rewrite assertion AST to produce nice error messages\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport ast\nimport errno\nimport imp\nimport itertools\nimport marshal\nimport os\nimport re\nimport string\nimport struct\nimport sys\nimport types\n\nimport atomicwrites\nimport py\nimport six\n\nfrom _pytest._io.saferepr import saferepr\nfrom _pytest.assertion import util\nfrom _pytest.assertion.util import ( # noqa: F401\n format_explanation as _format_explanation,\n)\nfrom _pytest.compat import spec_from_file_location\nfrom _pytest.pathlib import fnmatch_ex\nfrom _pytest.pathlib import PurePath\n\n# pytest caches rewritten pycs in __pycache__.\nif hasattr(imp, \"get_tag\"):\n PYTEST_TAG = imp.get_tag() + \"-PYTEST\"\nelse:\n if hasattr(sys, \"pypy_version_info\"):\n impl = \"pypy\"\n elif sys.platform == \"java\":\n impl = \"jython\"\n else:\n impl = \"cpython\"\n ver = sys.version_info\n PYTEST_TAG = \"%s-%s%s-PYTEST\" % (impl, ver[0], ver[1])\n del ver, impl\n\nPYC_EXT = \".py\" + (__debug__ and \"c\" or \"o\")\nPYC_TAIL = \".\" + PYTEST_TAG + PYC_EXT\n\nASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3\n\nif sys.version_info >= (3, 5):\n ast_Call = ast.Call\nelse:\n\n def ast_Call(a, b, c):\n return ast.Call(a, b, c, None, None)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewritingHook_AssertionRewritingHook._imp_find_module.return.imp_find_module_name_pat": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewritingHook_AssertionRewritingHook._imp_find_module.return.imp_find_module_name_pat", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 58, "end_line": 82, "span_ids": ["AssertionRewritingHook", "AssertionRewritingHook._imp_find_module", "AssertionRewritingHook.set_session"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class AssertionRewritingHook(object):\n \"\"\"PEP302 Import hook which rewrites asserts.\"\"\"\n\n def __init__(self, config):\n self.config = config\n self.fnpats = config.getini(\"python_files\")\n self.session = None\n self.modules = {}\n self._rewritten_names = set()\n self._register_with_pkg_resources()\n self._must_rewrite = set()\n # flag to guard against trying to rewrite a pyc file while we are already writing another pyc file,\n # which might result in infinite recursion (#3506)\n self._writing_pyc = False\n self._basenames_to_check_rewrite = {\"conftest\"}\n self._marked_for_rewrite_cache = {}\n self._session_paths_checked = False\n\n def set_session(self, session):\n self.session = session\n self._session_paths_checked = False\n\n def _imp_find_module(self, name, path=None):\n \"\"\"Indirection so we can mock calls to find_module originated from the hook during testing\"\"\"\n return imp.find_module(name, path)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewritingHook.find_module_AssertionRewritingHook.find_module.co._read_pyc_fn_pypath_pyc_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewritingHook.find_module_AssertionRewritingHook.find_module.co._read_pyc_fn_pypath_pyc_", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 84, "end_line": 163, "span_ids": ["AssertionRewritingHook.find_module"], "tokens": 795}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class AssertionRewritingHook(object):\n\n def find_module(self, name, path=None):\n if self._writing_pyc:\n return None\n state = self.config._assertstate\n if self._early_rewrite_bailout(name, state):\n return None\n state.trace(\"find_module called for: %s\" % name)\n names = name.rsplit(\".\", 1)\n lastname = names[-1]\n pth = None\n if path is not None:\n # Starting with Python 3.3, path is a _NamespacePath(), which\n # causes problems if not converted to list.\n path = list(path)\n if len(path) == 1:\n pth = path[0]\n if pth is None:\n try:\n fd, fn, desc = self._imp_find_module(lastname, path)\n except ImportError:\n return None\n if fd is not None:\n fd.close()\n tp = desc[2]\n if tp == imp.PY_COMPILED:\n if hasattr(imp, \"source_from_cache\"):\n try:\n fn = imp.source_from_cache(fn)\n except ValueError:\n # Python 3 doesn't like orphaned but still-importable\n # .pyc files.\n fn = fn[:-1]\n else:\n fn = fn[:-1]\n elif tp != imp.PY_SOURCE:\n # Don't know what this is.\n return None\n else:\n fn = os.path.join(pth, name.rpartition(\".\")[2] + \".py\")\n\n fn_pypath = py.path.local(fn)\n if not self._should_rewrite(name, fn_pypath, state):\n return None\n\n self._rewritten_names.add(name)\n\n # The requested module looks like a test file, so rewrite it. This is\n # the most magical part of the process: load the source, rewrite the\n # asserts, and load the rewritten source. We also cache the rewritten\n # module code in a special pyc. We must be aware of the possibility of\n # concurrent pytest processes rewriting and loading pycs. To avoid\n # tricky race conditions, we maintain the following invariant: The\n # cached pyc is always a complete, valid pyc. Operations on it must be\n # atomic. POSIX's atomic rename comes in handy.\n write = not sys.dont_write_bytecode\n cache_dir = os.path.join(fn_pypath.dirname, \"__pycache__\")\n if write:\n try:\n os.mkdir(cache_dir)\n except OSError:\n e = sys.exc_info()[1].errno\n if e == errno.EEXIST:\n # Either the __pycache__ directory already exists (the\n # common case) or it's blocked by a non-dir node. In the\n # latter case, we'll ignore it in _write_pyc.\n pass\n elif e in [errno.ENOENT, errno.ENOTDIR]:\n # One of the path components was not a directory, likely\n # because we're in a zip file.\n write = False\n elif e in [errno.EACCES, errno.EROFS, errno.EPERM]:\n state.trace(\"read only directory: %r\" % fn_pypath.dirname)\n write = False\n else:\n raise\n cache_name = fn_pypath.basename[:-3] + PYC_TAIL\n pyc = os.path.join(cache_dir, cache_name)\n # Notice that even if we're in a read-only directory, I'm going\n # to check for a cached pyc. This may not be optimal...\n co = _read_pyc(fn_pypath, pyc, state.trace)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewritingHook.find_module.if_co_is_None__AssertionRewritingHook.find_module.return.self": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewritingHook.find_module.if_co_is_None__AssertionRewritingHook.find_module.return.self", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 164, "end_line": 179, "span_ids": ["AssertionRewritingHook.find_module"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class AssertionRewritingHook(object):\n\n def find_module(self, name, path=None):\n # ... other code\n if co is None:\n state.trace(\"rewriting %r\" % (fn,))\n source_stat, co = _rewrite_test(self.config, fn_pypath)\n if co is None:\n # Probably a SyntaxError in the test.\n return None\n if write:\n self._writing_pyc = True\n try:\n _write_pyc(state, co, source_stat, pyc)\n finally:\n self._writing_pyc = False\n else:\n state.trace(\"found cached rewritten pyc for %r\" % (fn,))\n self.modules[name] = co, pyc\n return self", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewritingHook._early_rewrite_bailout_AssertionRewritingHook._early_rewrite_bailout.return.True": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewritingHook._early_rewrite_bailout_AssertionRewritingHook._early_rewrite_bailout.return.True", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 181, "end_line": 217, "span_ids": ["AssertionRewritingHook._early_rewrite_bailout"], "tokens": 381}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class AssertionRewritingHook(object):\n\n def _early_rewrite_bailout(self, name, state):\n \"\"\"\n This is a fast way to get out of rewriting modules. Profiling has\n shown that the call to imp.find_module (inside of the find_module\n from this class) is a major slowdown, so, this method tries to\n filter what we're sure won't be rewritten before getting to it.\n \"\"\"\n if self.session is not None and not self._session_paths_checked:\n self._session_paths_checked = True\n for path in self.session._initialpaths:\n # Make something as c:/projects/my_project/path.py ->\n # ['c:', 'projects', 'my_project', 'path.py']\n parts = str(path).split(os.path.sep)\n # add 'path' to basenames to be checked.\n self._basenames_to_check_rewrite.add(os.path.splitext(parts[-1])[0])\n\n # Note: conftest already by default in _basenames_to_check_rewrite.\n parts = name.split(\".\")\n if parts[-1] in self._basenames_to_check_rewrite:\n return False\n\n # For matching the name it must be as if it was a filename.\n path = PurePath(os.path.sep.join(parts) + \".py\")\n\n for pat in self.fnpats:\n # if the pattern contains subdirectories (\"tests/**.py\" for example) we can't bail out based\n # on the name alone because we need to match against the full path\n if os.path.dirname(pat):\n return False\n if fnmatch_ex(pat, path):\n return False\n\n if self._is_marked_for_rewrite(name, state):\n return False\n\n state.trace(\"early skip of rewriting module: %s\" % (name,))\n return True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewritingHook._should_rewrite_AssertionRewritingHook._should_rewrite.return.self__is_marked_for_rewri": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewritingHook._should_rewrite_AssertionRewritingHook._should_rewrite.return.self__is_marked_for_rewri", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 219, "end_line": 238, "span_ids": ["AssertionRewritingHook._should_rewrite"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class AssertionRewritingHook(object):\n\n def _should_rewrite(self, name, fn_pypath, state):\n # always rewrite conftest files\n fn = str(fn_pypath)\n if fn_pypath.basename == \"conftest.py\":\n state.trace(\"rewriting conftest file: %r\" % (fn,))\n return True\n\n if self.session is not None:\n if self.session.isinitpath(fn):\n state.trace(\"matched test file (was specified on cmdline): %r\" % (fn,))\n return True\n\n # modules not passed explicitly on the command line are only\n # rewritten if they match the naming convention for test files\n for pat in self.fnpats:\n if fn_pypath.fnmatch(pat):\n state.trace(\"matched test file %r\" % (fn,))\n return True\n\n return self._is_marked_for_rewrite(name, state)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewritingHook._is_marked_for_rewrite_AssertionRewritingHook._warn_already_imported._issue_warning_captured_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewritingHook._is_marked_for_rewrite_AssertionRewritingHook._warn_already_imported._issue_warning_captured_", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 240, "end_line": 280, "span_ids": ["AssertionRewritingHook.mark_rewrite", "AssertionRewritingHook._is_marked_for_rewrite", "AssertionRewritingHook._warn_already_imported"], "tokens": 318}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class AssertionRewritingHook(object):\n\n def _is_marked_for_rewrite(self, name, state):\n try:\n return self._marked_for_rewrite_cache[name]\n except KeyError:\n for marked in self._must_rewrite:\n if name == marked or name.startswith(marked + \".\"):\n state.trace(\"matched marked file %r (from %r)\" % (name, marked))\n self._marked_for_rewrite_cache[name] = True\n return True\n\n self._marked_for_rewrite_cache[name] = False\n return False\n\n def mark_rewrite(self, *names):\n \"\"\"Mark import names as needing to be rewritten.\n\n The named module or package as well as any nested modules will\n be rewritten on import.\n \"\"\"\n already_imported = (\n set(names).intersection(sys.modules).difference(self._rewritten_names)\n )\n for name in already_imported:\n if not AssertionRewriter.is_rewrite_disabled(\n sys.modules[name].__doc__ or \"\"\n ):\n self._warn_already_imported(name)\n self._must_rewrite.update(names)\n self._marked_for_rewrite_cache.clear()\n\n def _warn_already_imported(self, name):\n from _pytest.warning_types import PytestAssertRewriteWarning\n from _pytest.warnings import _issue_warning_captured\n\n _issue_warning_captured(\n PytestAssertRewriteWarning(\n \"Module already imported so cannot be rewritten: %s\" % name\n ),\n self.config.hook,\n stacklevel=5,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewritingHook.load_module_AssertionRewritingHook.load_module.return.sys_modules_name_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewritingHook.load_module_AssertionRewritingHook.load_module.return.sys_modules_name_", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 282, "end_line": 306, "span_ids": ["AssertionRewritingHook.load_module"], "tokens": 259}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class AssertionRewritingHook(object):\n\n def load_module(self, name):\n co, pyc = self.modules.pop(name)\n if name in sys.modules:\n # If there is an existing module object named 'fullname' in\n # sys.modules, the loader must use that existing module. (Otherwise,\n # the reload() builtin will not work correctly.)\n mod = sys.modules[name]\n else:\n # I wish I could just call imp.load_compiled here, but __file__ has to\n # be set properly. In Python 3.2+, this all would be handled correctly\n # by load_compiled.\n mod = sys.modules[name] = imp.new_module(name)\n try:\n mod.__file__ = co.co_filename\n # Normally, this attribute is 3.2+.\n mod.__cached__ = pyc\n mod.__loader__ = self\n # Normally, this attribute is 3.4+\n mod.__spec__ = spec_from_file_location(name, co.co_filename, loader=self)\n six.exec_(co, mod.__dict__)\n except: # noqa\n if name in sys.modules:\n del sys.modules[name]\n raise\n return sys.modules[name]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewritingHook.is_package_AssertionRewritingHook.get_data.with_open_pathname_rb_.return.f_read_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewritingHook.is_package_AssertionRewritingHook.get_data.with_open_pathname_rb_.return.f_read_", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 308, "end_line": 340, "span_ids": ["AssertionRewritingHook._register_with_pkg_resources", "AssertionRewritingHook.is_package", "AssertionRewritingHook.get_data"], "tokens": 218}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class AssertionRewritingHook(object):\n\n def is_package(self, name):\n try:\n fd, fn, desc = self._imp_find_module(name)\n except ImportError:\n return False\n if fd is not None:\n fd.close()\n tp = desc[2]\n return tp == imp.PKG_DIRECTORY\n\n @classmethod\n def _register_with_pkg_resources(cls):\n \"\"\"\n Ensure package resources can be loaded from this loader. May be called\n multiple times, as the operation is idempotent.\n \"\"\"\n try:\n import pkg_resources\n\n # access an attribute in case a deferred importer is present\n pkg_resources.__name__\n except ImportError:\n return\n\n # Since pytest tests are always located in the file system, the\n # DefaultProvider is appropriate.\n pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)\n\n def get_data(self, pathname):\n \"\"\"Optional PEP302 get_data API.\n \"\"\"\n with open(pathname, \"rb\") as f:\n return f.read()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py__write_pyc_BOM_UTF8._xef_xbb_xbf_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py__write_pyc_BOM_UTF8._xef_xbb_xbf_", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 343, "end_line": 371, "span_ids": ["_write_pyc", "impl:29"], "tokens": 351}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _write_pyc(state, co, source_stat, pyc):\n # Technically, we don't have to have the same pyc format as\n # (C)Python, since these \"pycs\" should never be seen by builtin\n # import. However, there's little reason deviate, and I hope\n # sometime to be able to use imp.load_compiled to load them. (See\n # the comment in load_module above.)\n try:\n with atomicwrites.atomic_write(pyc, mode=\"wb\", overwrite=True) as fp:\n fp.write(imp.get_magic())\n # as of now, bytecode header expects 32-bit numbers for size and mtime (#4903)\n mtime = int(source_stat.mtime) & 0xFFFFFFFF\n size = source_stat.size & 0xFFFFFFFF\n # \">\",\n ast.Add: \"+\",\n ast.Sub: \"-\",\n ast.Mult: \"*\",\n ast.Div: \"/\",\n ast.FloorDiv: \"//\",\n ast.Mod: \"%%\", # escaped for string formatting\n ast.Eq: \"==\",\n ast.NotEq: \"!=\",\n ast.Lt: \"<\",\n ast.LtE: \"<=\",\n ast.Gt: \">\",\n ast.GtE: \">=\",\n ast.Pow: \"**\",\n ast.Is: \"is\",\n ast.IsNot: \"is not\",\n ast.In: \"in\",\n ast.NotIn: \"not in\",\n}\n# Python 3.5+ compatibility\ntry:\n binop_map[ast.MatMult] = \"@\"\nexcept AttributeError:\n pass\n\n# Python 3.4+ compatibility\nif hasattr(ast, \"NameConstant\"):\n _NameConstant = ast.NameConstant\nelse:\n\n def _NameConstant(c):\n return ast.Name(str(c), ast.Load())\n\n\ndef set_location(node, lineno, col_offset):\n \"\"\"Set node location information recursively.\"\"\"\n\n def _fix(node, lineno, col_offset):\n if \"lineno\" in node._attributes:\n node.lineno = lineno\n if \"col_offset\" in node._attributes:\n node.col_offset = col_offset\n for child in ast.iter_child_nodes(node):\n _fix(child, lineno, col_offset)\n\n _fix(node, lineno, col_offset)\n return node", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter_AssertionRewriter.__init__.self.config.config": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter_AssertionRewriter.__init__.self.config.config", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 608, "end_line": 663, "span_ids": ["AssertionRewriter"], "tokens": 542}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class AssertionRewriter(ast.NodeVisitor):\n \"\"\"Assertion rewriting implementation.\n\n The main entrypoint is to call .run() with an ast.Module instance,\n this will then find all the assert statements and rewrite them to\n provide intermediate values and a detailed assertion error. See\n http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html\n for an overview of how this works.\n\n The entry point here is .run() which will iterate over all the\n statements in an ast.Module and for each ast.Assert statement it\n finds call .visit() with it. Then .visit_Assert() takes over and\n is responsible for creating new ast statements to replace the\n original assert statement: it rewrites the test of an assertion\n to provide intermediate values and replace it with an if statement\n which raises an assertion error with a detailed explanation in\n case the expression is false.\n\n For this .visit_Assert() uses the visitor pattern to visit all the\n AST nodes of the ast.Assert.test field, each visit call returning\n an AST node and the corresponding explanation string. During this\n state is kept in several instance attributes:\n\n :statements: All the AST statements which will replace the assert\n statement.\n\n :variables: This is populated by .variable() with each variable\n used by the statements so that they can all be set to None at\n the end of the statements.\n\n :variable_counter: Counter to create new unique variables needed\n by statements. Variables are created using .variable() and\n have the form of \"@py_assert0\".\n\n :on_failure: The AST statements which will be executed if the\n assertion test fails. This is the code which will construct\n the failure message and raises the AssertionError.\n\n :explanation_specifiers: A dict filled by .explanation_param()\n with %-formatting placeholders and their corresponding\n expressions to use in the building of an assertion message.\n This is used by .pop_format_context() to build a message.\n\n :stack: A stack of the explanation_specifiers dicts maintained by\n .push_format_context() and .pop_format_context() which allows\n to build another %-formatted string while already building one.\n\n This state is reset on every new assert statement visited and used\n by the other visitors.\n\n \"\"\"\n\n def __init__(self, module_path, config):\n super(AssertionRewriter, self).__init__()\n self.module_path = module_path\n self.config = config", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.run_AssertionRewriter.run.while_nodes_.for_name_field_in_ast_it.if_isinstance_field_list.elif_.nodes_append_field_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.run_AssertionRewriter.run.while_nodes_.for_name_field_in_ast_it.if_isinstance_field_list.elif_.nodes_append_field_", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 665, "end_line": 728, "span_ids": ["AssertionRewriter.run"], "tokens": 455}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class AssertionRewriter(ast.NodeVisitor):\n\n def run(self, mod):\n \"\"\"Find all assert statements in *mod* and rewrite them.\"\"\"\n if not mod.body:\n # Nothing to do.\n return\n # Insert some special imports at the top of the module but after any\n # docstrings and __future__ imports.\n aliases = [\n ast.alias(six.moves.builtins.__name__, \"@py_builtins\"),\n ast.alias(\"_pytest.assertion.rewrite\", \"@pytest_ar\"),\n ]\n doc = getattr(mod, \"docstring\", None)\n expect_docstring = doc is None\n if doc is not None and self.is_rewrite_disabled(doc):\n return\n pos = 0\n lineno = 1\n for item in mod.body:\n if (\n expect_docstring\n and isinstance(item, ast.Expr)\n and isinstance(item.value, ast.Str)\n ):\n doc = item.value.s\n if self.is_rewrite_disabled(doc):\n return\n expect_docstring = False\n elif (\n not isinstance(item, ast.ImportFrom)\n or item.level > 0\n or item.module != \"__future__\"\n ):\n lineno = item.lineno\n break\n pos += 1\n else:\n lineno = item.lineno\n imports = [\n ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases\n ]\n mod.body[pos:pos] = imports\n # Collect asserts.\n nodes = [mod]\n while nodes:\n node = nodes.pop()\n for name, field in ast.iter_fields(node):\n if isinstance(field, list):\n new = []\n for i, child in enumerate(field):\n if isinstance(child, ast.Assert):\n # Transform assert.\n new.extend(self.visit(child))\n else:\n new.append(child)\n if isinstance(child, ast.AST):\n nodes.append(child)\n setattr(node, name, new)\n elif (\n isinstance(field, ast.AST)\n # Don't recurse into expressions as they can't contain\n # asserts.\n and not isinstance(field, ast.expr)\n ):\n nodes.append(field)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.is_rewrite_disabled_AssertionRewriter.builtin.return.ast_Attribute_builtin_nam": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.is_rewrite_disabled_AssertionRewriter.builtin.return.ast_Attribute_builtin_nam", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 730, "end_line": 760, "span_ids": ["AssertionRewriter.builtin", "AssertionRewriter.assign", "AssertionRewriter.helper", "AssertionRewriter.display", "AssertionRewriter.variable", "AssertionRewriter.is_rewrite_disabled"], "tokens": 262}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class AssertionRewriter(ast.NodeVisitor):\n\n @staticmethod\n def is_rewrite_disabled(docstring):\n return \"PYTEST_DONT_REWRITE\" in docstring\n\n def variable(self):\n \"\"\"Get a new variable.\"\"\"\n # Use a character invalid in python identifiers to avoid clashing.\n name = \"@py_assert\" + str(next(self.variable_counter))\n self.variables.append(name)\n return name\n\n def assign(self, expr):\n \"\"\"Give *expr* a name.\"\"\"\n name = self.variable()\n self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))\n return ast.Name(name, ast.Load())\n\n def display(self, expr):\n \"\"\"Call saferepr on the expression.\"\"\"\n return self.helper(\"_saferepr\", expr)\n\n def helper(self, name, *args):\n \"\"\"Call a helper in this module.\"\"\"\n py_name = ast.Name(\"@pytest_ar\", ast.Load())\n attr = ast.Attribute(py_name, name, ast.Load())\n return ast_Call(attr, list(args), [])\n\n def builtin(self, name):\n \"\"\"Return the builtin called *name*.\"\"\"\n builtin_name = ast.Name(\"@py_builtins\", ast.Load())\n return ast.Attribute(builtin_name, name, ast.Load())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.explanation_param_AssertionRewriter.explanation_param.return._specifier_s_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.explanation_param_AssertionRewriter.explanation_param.return._specifier_s_", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 762, "end_line": 773, "span_ids": ["AssertionRewriter.explanation_param"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class AssertionRewriter(ast.NodeVisitor):\n\n def explanation_param(self, expr):\n \"\"\"Return a new named %-formatting placeholder for expr.\n\n This creates a %-formatting placeholder for expr in the\n current formatting context, e.g. ``%(py0)s``. The placeholder\n and expr are placed in the current format context so that it\n can be used on the next call to .pop_format_context().\n\n \"\"\"\n specifier = \"py\" + str(next(self.variable_counter))\n self.explanation_specifiers[specifier] = expr\n return \"%(\" + specifier + \")s\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.push_format_context_AssertionRewriter.push_format_context.self_stack_append_self_ex": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.push_format_context_AssertionRewriter.push_format_context.self_stack_append_self_ex", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 775, "end_line": 787, "span_ids": ["AssertionRewriter.push_format_context"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class AssertionRewriter(ast.NodeVisitor):\n\n def push_format_context(self):\n \"\"\"Create a new formatting context.\n\n The format context is used for when an explanation wants to\n have a variable value formatted in the assertion message. In\n this case the value required can be added using\n .explanation_param(). Finally .pop_format_context() is used\n to format a string of %-formatted values as added by\n .explanation_param().\n\n \"\"\"\n self.explanation_specifiers = {}\n self.stack.append(self.explanation_specifiers)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.pop_format_context_AssertionRewriter.generic_visit.return.res_self_explanation_par": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.pop_format_context_AssertionRewriter.generic_visit.return.res_self_explanation_par", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 789, "end_line": 812, "span_ids": ["AssertionRewriter.pop_format_context", "AssertionRewriter.generic_visit"], "tokens": 241}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class AssertionRewriter(ast.NodeVisitor):\n\n def pop_format_context(self, expl_expr):\n \"\"\"Format the %-formatted string with current format context.\n\n The expl_expr should be an ast.Str instance constructed from\n the %-placeholders created by .explanation_param(). This will\n add the required code to format said string to .on_failure and\n return the ast.Name instance of the formatted string.\n\n \"\"\"\n current = self.stack.pop()\n if self.stack:\n self.explanation_specifiers = self.stack[-1]\n keys = [ast.Str(key) for key in current.keys()]\n format_dict = ast.Dict(keys, list(current.values()))\n form = ast.BinOp(expl_expr, ast.Mod(), format_dict)\n name = \"@py_format\" + str(next(self.variable_counter))\n self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))\n return ast.Name(name, ast.Load())\n\n def generic_visit(self, node):\n \"\"\"Handle expressions we don't have custom code for.\"\"\"\n assert isinstance(node, ast.expr)\n res = self.assign(node)\n return res, self.explanation_param(self.display(res))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.visit_Assert_AssertionRewriter.visit_Assert.return.self_statements": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.visit_Assert_AssertionRewriter.visit_Assert.return.self_statements", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 814, "end_line": 879, "span_ids": ["AssertionRewriter.visit_Assert"], "tokens": 575}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class AssertionRewriter(ast.NodeVisitor):\n\n def visit_Assert(self, assert_):\n \"\"\"Return the AST statements to replace the ast.Assert instance.\n\n This rewrites the test of an assertion to provide\n intermediate values and replace it with an if statement which\n raises an assertion error with a detailed explanation in case\n the expression is false.\n\n \"\"\"\n if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1:\n from _pytest.warning_types import PytestAssertRewriteWarning\n import warnings\n\n warnings.warn_explicit(\n PytestAssertRewriteWarning(\n \"assertion is always true, perhaps remove parentheses?\"\n ),\n category=None,\n filename=str(self.module_path),\n lineno=assert_.lineno,\n )\n\n self.statements = []\n self.variables = []\n self.variable_counter = itertools.count()\n self.stack = []\n self.on_failure = []\n self.push_format_context()\n # Rewrite assert into a bunch of statements.\n top_condition, explanation = self.visit(assert_.test)\n # If in a test module, check if directly asserting None, in order to warn [Issue #3191]\n if self.module_path is not None:\n self.statements.append(\n self.warn_about_none_ast(\n top_condition, module_path=self.module_path, lineno=assert_.lineno\n )\n )\n # Create failure message.\n body = self.on_failure\n negation = ast.UnaryOp(ast.Not(), top_condition)\n self.statements.append(ast.If(negation, body, []))\n if assert_.msg:\n assertmsg = self.helper(\"_format_assertmsg\", assert_.msg)\n explanation = \"\\n>assert \" + explanation\n else:\n assertmsg = ast.Str(\"\")\n explanation = \"assert \" + explanation\n template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))\n msg = self.pop_format_context(template)\n fmt = self.helper(\"_format_explanation\", msg)\n err_name = ast.Name(\"AssertionError\", ast.Load())\n exc = ast_Call(err_name, [fmt], [])\n if sys.version_info[0] >= 3:\n raise_ = ast.Raise(exc, None)\n else:\n raise_ = ast.Raise(exc, None, None)\n body.append(raise_)\n # Clear temporary variables by setting them to None.\n if self.variables:\n variables = [ast.Name(name, ast.Store()) for name in self.variables]\n clear = ast.Assign(variables, _NameConstant(None))\n self.statements.append(clear)\n # Fix line numbers.\n for stmt in self.statements:\n set_location(stmt, assert_.lineno, assert_.col_offset)\n return self.statements", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.warn_about_none_ast_AssertionRewriter.warn_about_none_ast.return.ast_If_val_is_none_send_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.warn_about_none_ast_AssertionRewriter.warn_about_none_ast.return.ast_If_val_is_none_send_", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 881, "end_line": 906, "span_ids": ["AssertionRewriter.warn_about_none_ast"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class AssertionRewriter(ast.NodeVisitor):\n\n def warn_about_none_ast(self, node, module_path, lineno):\n \"\"\"\n Returns an AST issuing a warning if the value of node is `None`.\n This is used to warn the user when asserting a function that asserts\n internally already.\n See issue #3191 for more details.\n \"\"\"\n\n # Using parse because it is different between py2 and py3.\n AST_NONE = ast.parse(\"None\").body[0].value\n val_is_none = ast.Compare(node, [ast.Is()], [AST_NONE])\n send_warning = ast.parse(\n \"\"\"\nfrom _pytest.warning_types import PytestAssertRewriteWarning\nfrom warnings import warn_explicit\nwarn_explicit(\n PytestAssertRewriteWarning('asserting the value None, please use \"assert is None\"'),\n category=None,\n filename={filename!r},\n lineno={lineno},\n)\n \"\"\".format(\n filename=module_path.strpath, lineno=lineno\n )\n ).body\n return ast.If(val_is_none, send_warning, [])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.visit_Name_AssertionRewriter.visit_Name.return.name_self_explanation_pa": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.visit_Name_AssertionRewriter.visit_Name.return.name_self_explanation_pa", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 908, "end_line": 916, "span_ids": ["AssertionRewriter.visit_Name"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class AssertionRewriter(ast.NodeVisitor):\n\n def visit_Name(self, name):\n # Display the repr of the name if it's a local variable or\n # _should_repr_global_name() thinks it's acceptable.\n locs = ast_Call(self.builtin(\"locals\"), [], [])\n inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])\n dorepr = self.helper(\"_should_repr_global_name\", name)\n test = ast.BoolOp(ast.Or(), [inlocs, dorepr])\n expr = ast.IfExp(test, self.display(name), ast.Str(name.id))\n return name, self.explanation_param(expr)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.visit_BoolOp_AssertionRewriter.visit_BoolOp.return.ast_Name_res_var_ast_Loa": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.visit_BoolOp_AssertionRewriter.visit_BoolOp.return.ast_Name_res_var_ast_Loa", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 918, "end_line": 951, "span_ids": ["AssertionRewriter.visit_BoolOp"], "tokens": 353}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class AssertionRewriter(ast.NodeVisitor):\n\n def visit_BoolOp(self, boolop):\n res_var = self.variable()\n expl_list = self.assign(ast.List([], ast.Load()))\n app = ast.Attribute(expl_list, \"append\", ast.Load())\n is_or = int(isinstance(boolop.op, ast.Or))\n body = save = self.statements\n fail_save = self.on_failure\n levels = len(boolop.values) - 1\n self.push_format_context()\n # Process each operand, short-circuting if needed.\n for i, v in enumerate(boolop.values):\n if i:\n fail_inner = []\n # cond is set in a prior loop iteration below\n self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa\n self.on_failure = fail_inner\n self.push_format_context()\n res, expl = self.visit(v)\n body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))\n expl_format = self.pop_format_context(ast.Str(expl))\n call = ast_Call(app, [expl_format], [])\n self.on_failure.append(ast.Expr(call))\n if i < levels:\n cond = res\n if is_or:\n cond = ast.UnaryOp(ast.Not(), cond)\n inner = []\n self.statements.append(ast.If(cond, inner, []))\n self.statements = body = inner\n self.statements = save\n self.on_failure = fail_save\n expl_template = self.helper(\"_format_boolop\", expl_list, ast.Num(is_or))\n expl = self.pop_format_context(expl_template)\n return ast.Name(res_var, ast.Load()), self.explanation_param(expl)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.visit_UnaryOp_AssertionRewriter.visit_BinOp.return.res_explanation": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.visit_UnaryOp_AssertionRewriter.visit_BinOp.return.res_explanation", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 953, "end_line": 965, "span_ids": ["AssertionRewriter.visit_BinOp", "AssertionRewriter.visit_UnaryOp"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class AssertionRewriter(ast.NodeVisitor):\n\n def visit_UnaryOp(self, unary):\n pattern = unary_map[unary.op.__class__]\n operand_res, operand_expl = self.visit(unary.operand)\n res = self.assign(ast.UnaryOp(unary.op, operand_res))\n return res, pattern % (operand_expl,)\n\n def visit_BinOp(self, binop):\n symbol = binop_map[binop.op.__class__]\n left_expr, left_expl = self.visit(binop.left)\n right_expr, right_expl = self.visit(binop.right)\n explanation = \"(%s %s %s)\" % (left_expl, symbol, right_expl)\n res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))\n return res, explanation", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.visit_Call_35_AssertionRewriter.visit_Starred.return.new_starred_expl": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.visit_Call_35_AssertionRewriter.visit_Starred.return.new_starred_expl", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 967, "end_line": 998, "span_ids": ["AssertionRewriter.visit_Call_35", "AssertionRewriter.visit_Starred"], "tokens": 327}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class AssertionRewriter(ast.NodeVisitor):\n\n def visit_Call_35(self, call):\n \"\"\"\n visit `ast.Call` nodes on Python3.5 and after\n \"\"\"\n new_func, func_expl = self.visit(call.func)\n arg_expls = []\n new_args = []\n new_kwargs = []\n for arg in call.args:\n res, expl = self.visit(arg)\n arg_expls.append(expl)\n new_args.append(res)\n for keyword in call.keywords:\n res, expl = self.visit(keyword.value)\n new_kwargs.append(ast.keyword(keyword.arg, res))\n if keyword.arg:\n arg_expls.append(keyword.arg + \"=\" + expl)\n else: # **args have `arg` keywords with an .arg of None\n arg_expls.append(\"**\" + expl)\n\n expl = \"%s(%s)\" % (func_expl, \", \".join(arg_expls))\n new_call = ast.Call(new_func, new_args, new_kwargs)\n res = self.assign(new_call)\n res_expl = self.explanation_param(self.display(res))\n outer_expl = \"%s\\n{%s = %s\\n}\" % (res_expl, res_expl, expl)\n return res, outer_expl\n\n def visit_Starred(self, starred):\n # From Python 3.5, a Starred node can appear in a function call\n res, expl = self.visit(starred.value)\n new_starred = ast.Starred(res, starred.ctx)\n return new_starred, \"*\" + expl", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.visit_Call_legacy_AssertionRewriter.visit_Call_legacy.return.res_outer_expl": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.visit_Call_legacy_AssertionRewriter.visit_Call_legacy.return.res_outer_expl", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1000, "end_line": 1028, "span_ids": ["AssertionRewriter.visit_Call_legacy"], "tokens": 297}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class AssertionRewriter(ast.NodeVisitor):\n\n def visit_Call_legacy(self, call):\n \"\"\"\n visit `ast.Call nodes on 3.4 and below`\n \"\"\"\n new_func, func_expl = self.visit(call.func)\n arg_expls = []\n new_args = []\n new_kwargs = []\n new_star = new_kwarg = None\n for arg in call.args:\n res, expl = self.visit(arg)\n new_args.append(res)\n arg_expls.append(expl)\n for keyword in call.keywords:\n res, expl = self.visit(keyword.value)\n new_kwargs.append(ast.keyword(keyword.arg, res))\n arg_expls.append(keyword.arg + \"=\" + expl)\n if call.starargs:\n new_star, expl = self.visit(call.starargs)\n arg_expls.append(\"*\" + expl)\n if call.kwargs:\n new_kwarg, expl = self.visit(call.kwargs)\n arg_expls.append(\"**\" + expl)\n expl = \"%s(%s)\" % (func_expl, \", \".join(arg_expls))\n new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg)\n res = self.assign(new_call)\n res_expl = self.explanation_param(self.display(res))\n outer_expl = \"%s\\n{%s = %s\\n}\" % (res_expl, res_expl, expl)\n return res, outer_expl", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter._ast_Call_signature_chan_AssertionRewriter.visit_Attribute.return.res_expl": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter._ast_Call_signature_chan_AssertionRewriter.visit_Attribute.return.res_expl", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1030, "end_line": 1046, "span_ids": ["AssertionRewriter:3", "AssertionRewriter.visit_Call_legacy", "AssertionRewriter.visit_Attribute"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class AssertionRewriter(ast.NodeVisitor):\n\n # ast.Call signature changed on 3.5,\n # conditionally change which methods is named\n # visit_Call depending on Python version\n if sys.version_info >= (3, 5):\n visit_Call = visit_Call_35\n else:\n visit_Call = visit_Call_legacy\n\n def visit_Attribute(self, attr):\n if not isinstance(attr.ctx, ast.Load):\n return self.generic_visit(attr)\n value, value_expl = self.visit(attr.value)\n res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))\n res_expl = self.explanation_param(self.display(res))\n pat = \"%s\\n{%s = %s.%s\\n}\"\n expl = pat % (res_expl, res_expl, value_expl, attr.attr)\n return res, expl", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.visit_Compare_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/rewrite.py_AssertionRewriter.visit_Compare_", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/rewrite.py", "file_name": "rewrite.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1048, "end_line": 1085, "span_ids": ["AssertionRewriter.visit_Compare"], "tokens": 422}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class AssertionRewriter(ast.NodeVisitor):\n\n def visit_Compare(self, comp):\n self.push_format_context()\n left_res, left_expl = self.visit(comp.left)\n if isinstance(comp.left, (ast.Compare, ast.BoolOp)):\n left_expl = \"({})\".format(left_expl)\n res_variables = [self.variable() for i in range(len(comp.ops))]\n load_names = [ast.Name(v, ast.Load()) for v in res_variables]\n store_names = [ast.Name(v, ast.Store()) for v in res_variables]\n it = zip(range(len(comp.ops)), comp.ops, comp.comparators)\n expls = []\n syms = []\n results = [left_res]\n for i, op, next_operand in it:\n next_res, next_expl = self.visit(next_operand)\n if isinstance(next_operand, (ast.Compare, ast.BoolOp)):\n next_expl = \"({})\".format(next_expl)\n results.append(next_res)\n sym = binop_map[op.__class__]\n syms.append(ast.Str(sym))\n expl = \"%s %s %s\" % (left_expl, sym, next_expl)\n expls.append(ast.Str(expl))\n res_expr = ast.Compare(left_res, [op], [next_res])\n self.statements.append(ast.Assign([store_names[i]], res_expr))\n left_res, left_expl = next_res, next_expl\n # Use pytest.assertion.util._reprcompare if that's available.\n expl_call = self.helper(\n \"_call_reprcompare\",\n ast.Tuple(syms, ast.Load()),\n ast.Tuple(load_names, ast.Load()),\n ast.Tuple(expls, ast.Load()),\n ast.Tuple(results, ast.Load()),\n )\n if len(comp.ops) > 1:\n res = ast.BoolOp(ast.And(), load_names)\n else:\n res = load_names[0]\n return res, self.explanation_param(self.pop_format_context(expl_call))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/truncate.py____running_on_ci.return.any_var_in_os_environ_for": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/truncate.py____running_on_ci.return.any_var_in_os_environ_for", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/truncate.py", "file_name": "truncate.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 40, "span_ids": ["_running_on_ci", "impl", "docstring", "truncate_if_required", "_should_truncate_item", "imports"], "tokens": 235}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nUtilities for truncating assertion output.\n\nCurrent default behaviour is to truncate assertion explanations at\n~8 terminal lines, unless running in \"-vv\" mode or running on CI.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport six\n\nDEFAULT_MAX_LINES = 8\nDEFAULT_MAX_CHARS = 8 * 80\nUSAGE_MSG = \"use '-vv' to show\"\n\n\ndef truncate_if_required(explanation, item, max_length=None):\n \"\"\"\n Truncate this assertion explanation if the given test item is eligible.\n \"\"\"\n if _should_truncate_item(item):\n return _truncate_explanation(explanation)\n return explanation\n\n\ndef _should_truncate_item(item):\n \"\"\"\n Whether or not this test item is eligible for truncation.\n \"\"\"\n verbose = item.config.option.verbose\n return verbose < 2 and not _running_on_ci()\n\n\ndef _running_on_ci():\n \"\"\"Check if we're currently running on a CI system.\"\"\"\n env_vars = [\"CI\", \"BUILD_NUMBER\"]\n return any(var in os.environ for var in env_vars)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/truncate.py__truncate_explanation__truncate_explanation.return.truncated_explanation": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/truncate.py__truncate_explanation__truncate_explanation.return.truncated_explanation", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/truncate.py", "file_name": "truncate.py", "file_type": "text/x-python", "category": "implementation", "start_line": 43, "end_line": 79, "span_ids": ["_truncate_explanation"], "tokens": 341}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _truncate_explanation(input_lines, max_lines=None, max_chars=None):\n \"\"\"\n Truncate given list of strings that makes up the assertion explanation.\n\n Truncates to either 8 lines, or 640 characters - whichever the input reaches\n first. The remaining lines will be replaced by a usage message.\n \"\"\"\n\n if max_lines is None:\n max_lines = DEFAULT_MAX_LINES\n if max_chars is None:\n max_chars = DEFAULT_MAX_CHARS\n\n # Check if truncation required\n input_char_count = len(\"\".join(input_lines))\n if len(input_lines) <= max_lines and input_char_count <= max_chars:\n return input_lines\n\n # Truncate first to max_lines, and then truncate to max_chars if max_chars\n # is exceeded.\n truncated_explanation = input_lines[:max_lines]\n truncated_explanation = _truncate_by_char_count(truncated_explanation, max_chars)\n\n # Add ellipsis to final line\n truncated_explanation[-1] = truncated_explanation[-1] + \"...\"\n\n # Append useful message to explanation\n truncated_line_count = len(input_lines) - len(truncated_explanation)\n truncated_line_count += 1 # Account for the part-truncated final line\n msg = \"...Full output truncated\"\n if truncated_line_count == 1:\n msg += \" ({} line hidden)\".format(truncated_line_count)\n else:\n msg += \" ({} lines hidden)\".format(truncated_line_count)\n msg += \", {}\".format(USAGE_MSG)\n truncated_explanation.extend([six.text_type(\"\"), six.text_type(msg)])\n return truncated_explanation", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/truncate.py__truncate_by_char_count_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/truncate.py__truncate_by_char_count_", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/truncate.py", "file_name": "truncate.py", "file_type": "text/x-python", "category": "implementation", "start_line": 82, "end_line": 102, "span_ids": ["_truncate_by_char_count"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _truncate_by_char_count(input_lines, max_chars):\n # Check if truncation required\n if len(\"\".join(input_lines)) <= max_chars:\n return input_lines\n\n # Find point at which input length exceeds total allowed length\n iterated_char_count = 0\n for iterated_index, input_line in enumerate(input_lines):\n if iterated_char_count + len(input_line) > max_chars:\n break\n iterated_char_count += len(input_line)\n\n # Create truncated explanation with modified final line\n truncated_result = input_lines[:iterated_index]\n final_line = input_lines[iterated_index]\n if final_line:\n final_line_truncate_point = max_chars - iterated_char_count\n final_line = final_line[:final_line_truncate_point]\n truncated_result.append(final_line)\n return truncated_result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__Utilities_for_assertio_ecu.if_isinstance_s_bytes_.else_.return.s": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__Utilities_for_assertio_ecu.if_isinstance_s_bytes_.else_.return.s", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/util.py", "file_name": "util.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 28, "span_ids": ["ecu", "imports", "docstring", "impl"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"Utilities for assertion debugging\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pprint\n\nimport six\n\nimport _pytest._code\nfrom ..compat import Sequence\nfrom _pytest import outcomes\nfrom _pytest._io.saferepr import saferepr\n\n# The _reprcompare attribute on the util module is used by the new assertion\n# interpretation code and assertion rewriter to detect this plugin was\n# loaded and in turn call the hooks defined here as part of the\n# DebugInterpreter.\n_reprcompare = None\n\n\n# the re-encoding is needed for python2 repr\n# with non-ascii characters (see issue 877 and 1379)\ndef ecu(s):\n if isinstance(s, bytes):\n return s.decode(\"UTF-8\", \"replace\")\n else:\n return s", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py_format_explanation_format_explanation.return.u_n_join_result_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py_format_explanation_format_explanation.return.u_n_join_result_", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/util.py", "file_name": "util.py", "file_type": "text/x-python", "category": "implementation", "start_line": 31, "end_line": 44, "span_ids": ["format_explanation"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def format_explanation(explanation):\n \"\"\"This formats an explanation\n\n Normally all embedded newlines are escaped, however there are\n three exceptions: \\n{, \\n} and \\n~. The first two are intended\n cover nested explanations, see function and attribute explanations\n for examples (.visit_Call(), visit_Attribute()). The last one is\n for when one explanation needs to span multiple lines, e.g. when\n displaying diffs.\n \"\"\"\n explanation = ecu(explanation)\n lines = _split_explanation(explanation)\n result = _format_lines(lines)\n return u\"\\n\".join(result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__split_explanation__split_explanation.return.lines": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__split_explanation__split_explanation.return.lines", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/util.py", "file_name": "util.py", "file_type": "text/x-python", "category": "implementation", "start_line": 47, "end_line": 61, "span_ids": ["_split_explanation"], "tokens": 141}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _split_explanation(explanation):\n \"\"\"Return a list of individual lines in the explanation\n\n This will return a list of lines split on '\\n{', '\\n}' and '\\n~'.\n Any other newlines will be escaped and appear in the line as the\n literal '\\n' characters.\n \"\"\"\n raw_lines = (explanation or u\"\").split(\"\\n\")\n lines = [raw_lines[0]]\n for values in raw_lines[1:]:\n if values and values[0] in [\"{\", \"}\", \"~\", \">\"]:\n lines.append(values)\n else:\n lines[-1] += \"\\\\n\" + values\n return lines", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__format_lines__format_lines.return.result": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__format_lines__format_lines.return.result", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/util.py", "file_name": "util.py", "file_type": "text/x-python", "category": "implementation", "start_line": 64, "end_line": 96, "span_ids": ["_format_lines"], "tokens": 276}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _format_lines(lines):\n \"\"\"Format the individual lines\n\n This will replace the '{', '}' and '~' characters of our mini\n formatting language with the proper 'where ...', 'and ...' and ' +\n ...' text, taking care of indentation along the way.\n\n Return a list of formatted lines.\n \"\"\"\n result = lines[:1]\n stack = [0]\n stackcnt = [0]\n for line in lines[1:]:\n if line.startswith(\"{\"):\n if stackcnt[-1]:\n s = u\"and \"\n else:\n s = u\"where \"\n stack.append(len(result))\n stackcnt[-1] += 1\n stackcnt.append(0)\n result.append(u\" +\" + u\" \" * (len(stack) - 1) + s + line[1:])\n elif line.startswith(\"}\"):\n stack.pop()\n stackcnt.pop()\n result[stack[-1]] += line[1:]\n else:\n assert line[0] in [\"~\", \">\"]\n stack[-1] += 1\n indent = len(stack) if line.startswith(\"~\") else len(stack) - 1\n result.append(u\" \" * indent + line[1:])\n assert len(stack) == 1\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__Provide_basestring_in_p_isiterable.try_.except_TypeError_.return.False": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__Provide_basestring_in_p_isiterable.try_.except_TypeError_.return.False", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/util.py", "file_name": "util.py", "file_type": "text/x-python", "category": "implementation", "start_line": 99, "end_line": 135, "span_ids": ["isset", "isdict", "isdatacls", "_format_lines", "istext", "isattrs", "isiterable", "impl:3", "issequence"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# Provide basestring in python3\ntry:\n basestring = basestring\nexcept NameError:\n basestring = str\n\n\ndef issequence(x):\n return isinstance(x, Sequence) and not isinstance(x, basestring)\n\n\ndef istext(x):\n return isinstance(x, basestring)\n\n\ndef isdict(x):\n return isinstance(x, dict)\n\n\ndef isset(x):\n return isinstance(x, (set, frozenset))\n\n\ndef isdatacls(obj):\n return getattr(obj, \"__dataclass_fields__\", None) is not None\n\n\ndef isattrs(obj):\n return getattr(obj, \"__attrs_attrs__\", None) is not None\n\n\ndef isiterable(obj):\n try:\n iter(obj)\n return not istext(obj)\n except TypeError:\n return False", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py_assertrepr_compare_assertrepr_compare.return._summary_explanation": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py_assertrepr_compare_assertrepr_compare.return._summary_explanation", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/util.py", "file_name": "util.py", "file_type": "text/x-python", "category": "implementation", "start_line": 138, "end_line": 185, "span_ids": ["assertrepr_compare"], "tokens": 452}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def assertrepr_compare(config, op, left, right):\n \"\"\"Return specialised explanations for some operators/operands\"\"\"\n width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op\n left_repr = saferepr(left, maxsize=int(width // 2))\n right_repr = saferepr(right, maxsize=width - len(left_repr))\n\n summary = u\"%s %s %s\" % (ecu(left_repr), op, ecu(right_repr))\n\n verbose = config.getoption(\"verbose\")\n explanation = None\n try:\n if op == \"==\":\n if istext(left) and istext(right):\n explanation = _diff_text(left, right, verbose)\n else:\n if issequence(left) and issequence(right):\n explanation = _compare_eq_sequence(left, right, verbose)\n elif isset(left) and isset(right):\n explanation = _compare_eq_set(left, right, verbose)\n elif isdict(left) and isdict(right):\n explanation = _compare_eq_dict(left, right, verbose)\n elif type(left) == type(right) and (isdatacls(left) or isattrs(left)):\n type_fn = (isdatacls, isattrs)\n explanation = _compare_eq_cls(left, right, verbose, type_fn)\n elif verbose > 0:\n explanation = _compare_eq_verbose(left, right)\n if isiterable(left) and isiterable(right):\n expl = _compare_eq_iterable(left, right, verbose)\n if explanation is not None:\n explanation.extend(expl)\n else:\n explanation = expl\n elif op == \"not in\":\n if istext(left) and istext(right):\n explanation = _notin_text(left, right, verbose)\n except outcomes.Exit:\n raise\n except Exception:\n explanation = [\n u\"(pytest_assertion plugin: representation of details failed. \"\n u\"Probably an object has a faulty __repr__.)\",\n six.text_type(_pytest._code.ExceptionInfo.from_current()),\n ]\n\n if not explanation:\n return None\n\n return [summary] + explanation", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__diff_text__diff_text.escape_for_readable_diff.return.r": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__diff_text__diff_text.escape_for_readable_diff.return.r", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/util.py", "file_name": "util.py", "file_type": "text/x-python", "category": "implementation", "start_line": 188, "end_line": 209, "span_ids": ["_diff_text"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _diff_text(left, right, verbose=0):\n \"\"\"Return the explanation for the diff between text or bytes.\n\n Unless --verbose is used this will skip leading and trailing\n characters which are identical to keep the diff minimal.\n\n If the input are bytes they will be safely converted to text.\n \"\"\"\n from difflib import ndiff\n\n explanation = []\n\n def escape_for_readable_diff(binary_text):\n \"\"\"\n Ensures that the internal string is always valid unicode, converting any bytes safely to valid unicode.\n This is done using repr() which then needs post-processing to fix the encompassing quotes and un-escape\n newlines and carriage returns (#429).\n \"\"\"\n r = six.text_type(repr(binary_text)[1:-1])\n r = r.replace(r\"\\n\", \"\\n\")\n r = r.replace(r\"\\r\", \"\\r\")\n return r\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__diff_text.if_isinstance_left_bytes__diff_text.return.explanation": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__diff_text.if_isinstance_left_bytes__diff_text.return.explanation", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/util.py", "file_name": "util.py", "file_type": "text/x-python", "category": "implementation", "start_line": 211, "end_line": 248, "span_ids": ["_diff_text"], "tokens": 331}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _diff_text(left, right, verbose=0):\n # ... other code\n\n if isinstance(left, bytes):\n left = escape_for_readable_diff(left)\n if isinstance(right, bytes):\n right = escape_for_readable_diff(right)\n if verbose < 1:\n i = 0 # just in case left or right has zero length\n for i in range(min(len(left), len(right))):\n if left[i] != right[i]:\n break\n if i > 42:\n i -= 10 # Provide some context\n explanation = [\n u\"Skipping %s identical leading characters in diff, use -v to show\" % i\n ]\n left = left[i:]\n right = right[i:]\n if len(left) == len(right):\n for i in range(len(left)):\n if left[-i] != right[-i]:\n break\n if i > 42:\n i -= 10 # Provide some context\n explanation += [\n u\"Skipping {} identical trailing \"\n u\"characters in diff, use -v to show\".format(i)\n ]\n left = left[:-i]\n right = right[:-i]\n keepends = True\n if left.isspace() or right.isspace():\n left = repr(str(left))\n right = repr(str(right))\n explanation += [u\"Strings contain only whitespace, escaping them using repr()\"]\n explanation += [\n line.strip(\"\\n\")\n for line in ndiff(left.splitlines(keepends), right.splitlines(keepends))\n ]\n return explanation", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__compare_eq_verbose__compare_eq_iterable.return.explanation": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__compare_eq_verbose__compare_eq_iterable.return.explanation", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/util.py", "file_name": "util.py", "file_type": "text/x-python", "category": "implementation", "start_line": 251, "end_line": 283, "span_ids": ["_compare_eq_iterable", "_compare_eq_verbose"], "tokens": 312}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _compare_eq_verbose(left, right):\n keepends = True\n left_lines = repr(left).splitlines(keepends)\n right_lines = repr(right).splitlines(keepends)\n\n explanation = []\n explanation += [u\"-\" + line for line in left_lines]\n explanation += [u\"+\" + line for line in right_lines]\n\n return explanation\n\n\ndef _compare_eq_iterable(left, right, verbose=0):\n if not verbose:\n return [u\"Use -v to get the full diff\"]\n # dynamic import to speedup pytest\n import difflib\n\n try:\n left_formatting = pprint.pformat(left).splitlines()\n right_formatting = pprint.pformat(right).splitlines()\n explanation = [u\"Full diff:\"]\n except Exception:\n # hack: PrettyPrinter.pformat() in python 2 fails when formatting items that can't be sorted(), ie, calling\n # sorted() on a list would raise. See issue #718.\n # As a workaround, the full diff is generated by using the repr() string of each item of each container.\n left_formatting = sorted(repr(x) for x in left)\n right_formatting = sorted(repr(x) for x in right)\n explanation = [u\"Full diff (fallback to calling repr on each item):\"]\n explanation.extend(\n line.strip() for line in difflib.ndiff(left_formatting, right_formatting)\n )\n return explanation", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__compare_eq_sequence__compare_eq_set.return.explanation": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__compare_eq_sequence__compare_eq_set.return.explanation", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/util.py", "file_name": "util.py", "file_type": "text/x-python", "category": "implementation", "start_line": 286, "end_line": 327, "span_ids": ["_compare_eq_sequence", "_compare_eq_set"], "tokens": 329}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _compare_eq_sequence(left, right, verbose=0):\n explanation = []\n len_left = len(left)\n len_right = len(right)\n for i in range(min(len_left, len_right)):\n if left[i] != right[i]:\n explanation += [u\"At index %s diff: %r != %r\" % (i, left[i], right[i])]\n break\n len_diff = len_left - len_right\n\n if len_diff:\n if len_diff > 0:\n dir_with_more = \"Left\"\n extra = saferepr(left[len_right])\n else:\n len_diff = 0 - len_diff\n dir_with_more = \"Right\"\n extra = saferepr(right[len_left])\n\n if len_diff == 1:\n explanation += [u\"%s contains one more item: %s\" % (dir_with_more, extra)]\n else:\n explanation += [\n u\"%s contains %d more items, first extra item: %s\"\n % (dir_with_more, len_diff, extra)\n ]\n return explanation\n\n\ndef _compare_eq_set(left, right, verbose=0):\n explanation = []\n diff_left = left - right\n diff_right = right - left\n if diff_left:\n explanation.append(u\"Extra items in the left set:\")\n for item in diff_left:\n explanation.append(saferepr(item))\n if diff_right:\n explanation.append(u\"Extra items in the right set:\")\n for item in diff_right:\n explanation.append(saferepr(item))\n return explanation", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__compare_eq_dict__compare_eq_dict.return.explanation": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__compare_eq_dict__compare_eq_dict.return.explanation", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/util.py", "file_name": "util.py", "file_type": "text/x-python", "category": "implementation", "start_line": 330, "end_line": 366, "span_ids": ["_compare_eq_dict"], "tokens": 365}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _compare_eq_dict(left, right, verbose=0):\n explanation = []\n set_left = set(left)\n set_right = set(right)\n common = set_left.intersection(set_right)\n same = {k: left[k] for k in common if left[k] == right[k]}\n if same and verbose < 2:\n explanation += [u\"Omitting %s identical items, use -vv to show\" % len(same)]\n elif same:\n explanation += [u\"Common items:\"]\n explanation += pprint.pformat(same).splitlines()\n diff = {k for k in common if left[k] != right[k]}\n if diff:\n explanation += [u\"Differing items:\"]\n for k in diff:\n explanation += [saferepr({k: left[k]}) + \" != \" + saferepr({k: right[k]})]\n extra_left = set_left - set_right\n len_extra_left = len(extra_left)\n if len_extra_left:\n explanation.append(\n u\"Left contains %d more item%s:\"\n % (len_extra_left, \"\" if len_extra_left == 1 else \"s\")\n )\n explanation.extend(\n pprint.pformat({k: left[k] for k in extra_left}).splitlines()\n )\n extra_right = set_right - set_left\n len_extra_right = len(extra_right)\n if len_extra_right:\n explanation.append(\n u\"Right contains %d more item%s:\"\n % (len_extra_right, \"\" if len_extra_right == 1 else \"s\")\n )\n explanation.extend(\n pprint.pformat({k: right[k] for k in extra_right}).splitlines()\n )\n return explanation", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__compare_eq_cls__compare_eq_cls.return.explanation": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__compare_eq_cls__compare_eq_cls.return.explanation", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/util.py", "file_name": "util.py", "file_type": "text/x-python", "category": "implementation", "start_line": 369, "end_line": 398, "span_ids": ["_compare_eq_cls"], "tokens": 258}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _compare_eq_cls(left, right, verbose, type_fns):\n isdatacls, isattrs = type_fns\n if isdatacls(left):\n all_fields = left.__dataclass_fields__\n fields_to_check = [field for field, info in all_fields.items() if info.compare]\n elif isattrs(left):\n all_fields = left.__attrs_attrs__\n fields_to_check = [field.name for field in all_fields if field.cmp]\n\n same = []\n diff = []\n for field in fields_to_check:\n if getattr(left, field) == getattr(right, field):\n same.append(field)\n else:\n diff.append(field)\n\n explanation = []\n if same and verbose < 2:\n explanation.append(u\"Omitting %s identical items, use -vv to show\" % len(same))\n elif same:\n explanation += [u\"Matching attributes:\"]\n explanation += pprint.pformat(same).splitlines()\n if diff:\n explanation += [u\"Differing attributes:\"]\n for field in diff:\n explanation += [\n (u\"%s: %r != %r\") % (field, getattr(left, field), getattr(right, field))\n ]\n return explanation", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__notin_text_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/assertion/util.py__notin_text_", "embedding": null, "metadata": {"file_path": "src/_pytest/assertion/util.py", "file_name": "util.py", "file_type": "text/x-python", "category": "implementation", "start_line": 401, "end_line": 418, "span_ids": ["_notin_text"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _notin_text(term, text, verbose=0):\n index = text.find(term)\n head = text[:index]\n tail = text[index + len(term) :]\n correct_text = head + tail\n diff = _diff_text(correct_text, text, verbose)\n newdiff = [u\"%s is contained here:\" % saferepr(term, maxsize=42)]\n for line in diff:\n if line.startswith(u\"Skipping\"):\n continue\n if line.startswith(u\"- \"):\n continue\n if line.startswith(u\"+ \"):\n newdiff.append(u\" \" + line[2:])\n else:\n newdiff.append(line)\n return newdiff", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py___CACHEDIR_TAG_CONTENT.b_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py___CACHEDIR_TAG_CONTENT.b_", "embedding": null, "metadata": {"file_path": "src/_pytest/cacheprovider.py", "file_name": "cacheprovider.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 41, "span_ids": ["impl", "imports:11", "docstring", "impl:2", "imports"], "tokens": 250}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nmerged implementation of the cache provider\n\nthe name cache was not chosen to ensure pluggy automatically\nignores the external pytest-cache\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport os\nfrom collections import OrderedDict\n\nimport attr\nimport py\nimport six\n\nimport pytest\nfrom .compat import _PY2 as PY2\nfrom .pathlib import Path\nfrom .pathlib import resolve_from_str\nfrom .pathlib import rmtree\n\nREADME_CONTENT = u\"\"\"\\\n# pytest cache directory #\n\nThis directory contains data from the pytest's cache plugin,\nwhich provides the `--lf` and `--ff` options, as well as the `cache` fixture.\n\n**Do not** commit this to version control.\n\nSee [the docs](https://docs.pytest.org/en/latest/cache.html) for more information.\n\"\"\"\n\nCACHEDIR_TAG_CONTENT = b\"\"\"\\\nSignature: 8a477f597d28d172789f06886806bc55\n# This file is a cache directory tag created by pytest.\n# For information about cache directory tags, see:\n#\thttp://www.bford.info/cachedir/spec.html\n\"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_Cache_Cache.warn._issue_warning_captured_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_Cache_Cache.warn._issue_warning_captured_", "embedding": null, "metadata": {"file_path": "src/_pytest/cacheprovider.py", "file_name": "cacheprovider.py", "file_type": "text/x-python", "category": "implementation", "start_line": 44, "end_line": 69, "span_ids": ["Cache.cache_dir_from_config", "Cache.for_config", "Cache.warn", "Cache"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s\nclass Cache(object):\n _cachedir = attr.ib(repr=False)\n _config = attr.ib(repr=False)\n\n @classmethod\n def for_config(cls, config):\n cachedir = cls.cache_dir_from_config(config)\n if config.getoption(\"cacheclear\") and cachedir.exists():\n rmtree(cachedir, force=True)\n cachedir.mkdir()\n return cls(cachedir, config)\n\n @staticmethod\n def cache_dir_from_config(config):\n return resolve_from_str(config.getini(\"cache_dir\"), config.rootdir)\n\n def warn(self, fmt, **args):\n from _pytest.warnings import _issue_warning_captured\n from _pytest.warning_types import PytestCacheWarning\n\n _issue_warning_captured(\n PytestCacheWarning(fmt.format(**args) if args else fmt),\n self._config.hook,\n stacklevel=3,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_Cache.makedir_Cache.makedir.return.py_path_local_res_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_Cache.makedir_Cache.makedir.return.py_path_local_res_", "embedding": null, "metadata": {"file_path": "src/_pytest/cacheprovider.py", "file_name": "cacheprovider.py", "file_type": "text/x-python", "category": "implementation", "start_line": 71, "end_line": 86, "span_ids": ["Cache.makedir"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s\nclass Cache(object):\n\n def makedir(self, name):\n \"\"\" return a directory path object with the given name. If the\n directory does not yet exist, it will be created. You can use it\n to manage files likes e. g. store/retrieve database\n dumps across test sessions.\n\n :param name: must be a string not containing a ``/`` separator.\n Make sure the name contains your plugin or application\n identifiers to prevent clashes with other cache users.\n \"\"\"\n name = Path(name)\n if len(name.parts) > 1:\n raise ValueError(\"name is not allowed to contain path separators\")\n res = self._cachedir.joinpath(\"d\", name)\n res.mkdir(exist_ok=True, parents=True)\n return py.path.local(res)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_Cache._getvaluepath_Cache.get.try_.except_ValueError_IOErr.return.default": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_Cache._getvaluepath_Cache.get.try_.except_ValueError_IOErr.return.default", "embedding": null, "metadata": {"file_path": "src/_pytest/cacheprovider.py", "file_name": "cacheprovider.py", "file_type": "text/x-python", "category": "implementation", "start_line": 88, "end_line": 107, "span_ids": ["Cache.get", "Cache._getvaluepath"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s\nclass Cache(object):\n\n def _getvaluepath(self, key):\n return self._cachedir.joinpath(\"v\", Path(key))\n\n def get(self, key, default):\n \"\"\" return cached value for the given key. If no value\n was yet cached or the value cannot be read, the specified\n default is returned.\n\n :param key: must be a ``/`` separated value. Usually the first\n name is the name of your plugin or your application.\n :param default: must be provided in case of a cache-miss or\n invalid cache values.\n\n \"\"\"\n path = self._getvaluepath(key)\n try:\n with path.open(\"r\") as f:\n return json.load(f)\n except (ValueError, IOError, OSError):\n return default", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_Cache.set_Cache.set.None_1.else_.with_f_.json_dump_value_f_inden": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_Cache.set_Cache.set.None_1.else_.with_f_.json_dump_value_f_inden", "embedding": null, "metadata": {"file_path": "src/_pytest/cacheprovider.py", "file_name": "cacheprovider.py", "file_type": "text/x-python", "category": "implementation", "start_line": 109, "end_line": 136, "span_ids": ["Cache.set"], "tokens": 249}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s\nclass Cache(object):\n\n def set(self, key, value):\n \"\"\" save value for the given key.\n\n :param key: must be a ``/`` separated value. Usually the first\n name is the name of your plugin or your application.\n :param value: must be of any combination of basic\n python types, including nested types\n like e. g. lists of dictionaries.\n \"\"\"\n path = self._getvaluepath(key)\n try:\n if path.parent.is_dir():\n cache_dir_exists_already = True\n else:\n cache_dir_exists_already = self._cachedir.exists()\n path.parent.mkdir(exist_ok=True, parents=True)\n except (IOError, OSError):\n self.warn(\"could not create cache path {path}\", path=path)\n return\n if not cache_dir_exists_already:\n self._ensure_supporting_files()\n try:\n f = path.open(\"wb\" if PY2 else \"w\")\n except (IOError, OSError):\n self.warn(\"cache could not write path {path}\", path=path)\n else:\n with f:\n json.dump(value, f, indent=2, sort_keys=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_Cache._ensure_supporting_files_Cache._ensure_supporting_files.cachedir_tag_path_write_b": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_Cache._ensure_supporting_files_Cache._ensure_supporting_files.cachedir_tag_path_write_b", "embedding": null, "metadata": {"file_path": "src/_pytest/cacheprovider.py", "file_name": "cacheprovider.py", "file_type": "text/x-python", "category": "implementation", "start_line": 138, "end_line": 148, "span_ids": ["Cache._ensure_supporting_files"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s\nclass Cache(object):\n\n def _ensure_supporting_files(self):\n \"\"\"Create supporting files in the cache dir that are not really part of the cache.\"\"\"\n readme_path = self._cachedir / \"README.md\"\n readme_path.write_text(README_CONTENT)\n\n gitignore_path = self._cachedir.joinpath(\".gitignore\")\n msg = u\"# Created by pytest automatically.\\n*\"\n gitignore_path.write_text(msg, encoding=\"UTF-8\")\n\n cachedir_tag_path = self._cachedir.joinpath(\"CACHEDIR.TAG\")\n cachedir_tag_path.write_bytes(CACHEDIR_TAG_CONTENT)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_LFPlugin_LFPlugin.pytest_collectreport.if_passed_.else_.self_lastfailed_report_no": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_LFPlugin_LFPlugin.pytest_collectreport.if_passed_.else_.self_lastfailed_report_no", "embedding": null, "metadata": {"file_path": "src/_pytest/cacheprovider.py", "file_name": "cacheprovider.py", "file_type": "text/x-python", "category": "implementation", "start_line": 151, "end_line": 206, "span_ids": ["LFPlugin.pytest_runtest_logreport", "LFPlugin.pytest_report_collectionfinish", "LFPlugin.pytest_ignore_collect", "LFPlugin", "LFPlugin.pytest_collectreport", "LFPlugin.last_failed_paths"], "tokens": 489}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LFPlugin(object):\n \"\"\" Plugin which implements the --lf (run last-failing) option \"\"\"\n\n def __init__(self, config):\n self.config = config\n active_keys = \"lf\", \"failedfirst\"\n self.active = any(config.getoption(key) for key in active_keys)\n self.lastfailed = config.cache.get(\"cache/lastfailed\", {})\n self._previously_failed_count = None\n self._report_status = None\n self._skipped_files = 0 # count skipped files during collection due to --lf\n\n def last_failed_paths(self):\n \"\"\"Returns a set with all Paths()s of the previously failed nodeids (cached).\n \"\"\"\n result = getattr(self, \"_last_failed_paths\", None)\n if result is None:\n rootpath = Path(self.config.rootdir)\n result = {rootpath / nodeid.split(\"::\")[0] for nodeid in self.lastfailed}\n self._last_failed_paths = result\n return result\n\n def pytest_ignore_collect(self, path):\n \"\"\"\n Ignore this file path if we are in --lf mode and it is not in the list of\n previously failed files.\n \"\"\"\n if (\n self.active\n and self.config.getoption(\"lf\")\n and path.isfile()\n and self.lastfailed\n ):\n skip_it = Path(path) not in self.last_failed_paths()\n if skip_it:\n self._skipped_files += 1\n return skip_it\n\n def pytest_report_collectionfinish(self):\n if self.active and self.config.getoption(\"verbose\") >= 0:\n return \"run-last-failure: %s\" % self._report_status\n\n def pytest_runtest_logreport(self, report):\n if (report.when == \"call\" and report.passed) or report.skipped:\n self.lastfailed.pop(report.nodeid, None)\n elif report.failed:\n self.lastfailed[report.nodeid] = True\n\n def pytest_collectreport(self, report):\n passed = report.outcome in (\"passed\", \"skipped\")\n if passed:\n if report.nodeid in self.lastfailed:\n self.lastfailed.pop(report.nodeid)\n self.lastfailed.update((item.nodeid, True) for item in report.result)\n else:\n self.lastfailed[report.nodeid] = True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_LFPlugin.pytest_collection_modifyitems_LFPlugin.pytest_sessionfinish.if_saved_lastfailed_se.config_cache_set_cache_l": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_LFPlugin.pytest_collection_modifyitems_LFPlugin.pytest_sessionfinish.if_saved_lastfailed_se.config_cache_set_cache_l", "embedding": null, "metadata": {"file_path": "src/_pytest/cacheprovider.py", "file_name": "cacheprovider.py", "file_type": "text/x-python", "category": "implementation", "start_line": 208, "end_line": 266, "span_ids": ["LFPlugin.pytest_sessionfinish", "LFPlugin.pytest_collection_modifyitems"], "tokens": 517}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LFPlugin(object):\n\n def pytest_collection_modifyitems(self, session, config, items):\n if not self.active:\n return\n\n if self.lastfailed:\n previously_failed = []\n previously_passed = []\n for item in items:\n if item.nodeid in self.lastfailed:\n previously_failed.append(item)\n else:\n previously_passed.append(item)\n self._previously_failed_count = len(previously_failed)\n\n if not previously_failed:\n # Running a subset of all tests with recorded failures\n # only outside of it.\n self._report_status = \"%d known failures not in selected tests\" % (\n len(self.lastfailed),\n )\n else:\n if self.config.getoption(\"lf\"):\n items[:] = previously_failed\n config.hook.pytest_deselected(items=previously_passed)\n else: # --failedfirst\n items[:] = previously_failed + previously_passed\n\n noun = \"failure\" if self._previously_failed_count == 1 else \"failures\"\n if self._skipped_files > 0:\n files_noun = \"file\" if self._skipped_files == 1 else \"files\"\n skipped_files_msg = \" (skipped {files} {files_noun})\".format(\n files=self._skipped_files, files_noun=files_noun\n )\n else:\n skipped_files_msg = \"\"\n suffix = \" first\" if self.config.getoption(\"failedfirst\") else \"\"\n self._report_status = \"rerun previous {count} {noun}{suffix}{skipped_files}\".format(\n count=self._previously_failed_count,\n suffix=suffix,\n noun=noun,\n skipped_files=skipped_files_msg,\n )\n else:\n self._report_status = \"no previously failed tests, \"\n if self.config.getoption(\"last_failed_no_failures\") == \"none\":\n self._report_status += \"deselecting all items.\"\n config.hook.pytest_deselected(items=items)\n items[:] = []\n else:\n self._report_status += \"not deselecting items.\"\n\n def pytest_sessionfinish(self, session):\n config = self.config\n if config.getoption(\"cacheshow\") or hasattr(config, \"slaveinput\"):\n return\n\n saved_lastfailed = config.cache.get(\"cache/lastfailed\", {})\n if saved_lastfailed != self.lastfailed:\n config.cache.set(\"cache/lastfailed\", self.lastfailed)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_NFPlugin_NFPlugin.pytest_sessionfinish.config_cache_set_cache_n": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_NFPlugin_NFPlugin.pytest_sessionfinish.config_cache_set_cache_n", "embedding": null, "metadata": {"file_path": "src/_pytest/cacheprovider.py", "file_name": "cacheprovider.py", "file_type": "text/x-python", "category": "implementation", "start_line": 269, "end_line": 300, "span_ids": ["NFPlugin.pytest_sessionfinish", "NFPlugin", "NFPlugin.pytest_collection_modifyitems", "NFPlugin._get_increasing_order"], "tokens": 272}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class NFPlugin(object):\n \"\"\" Plugin which implements the --nf (run new-first) option \"\"\"\n\n def __init__(self, config):\n self.config = config\n self.active = config.option.newfirst\n self.cached_nodeids = config.cache.get(\"cache/nodeids\", [])\n\n def pytest_collection_modifyitems(self, session, config, items):\n if self.active:\n new_items = OrderedDict()\n other_items = OrderedDict()\n for item in items:\n if item.nodeid not in self.cached_nodeids:\n new_items[item.nodeid] = item\n else:\n other_items[item.nodeid] = item\n\n items[:] = self._get_increasing_order(\n six.itervalues(new_items)\n ) + self._get_increasing_order(six.itervalues(other_items))\n self.cached_nodeids = [x.nodeid for x in items if isinstance(x, pytest.Item)]\n\n def _get_increasing_order(self, items):\n return sorted(items, key=lambda item: item.fspath.mtime(), reverse=True)\n\n def pytest_sessionfinish(self, session):\n config = self.config\n if config.getoption(\"cacheshow\") or hasattr(config, \"slaveinput\"):\n return\n\n config.cache.set(\"cache/nodeids\", self.cached_nodeids)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_pytest_addoption_pytest_addoption.None_6": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_pytest_addoption_pytest_addoption.None_6", "embedding": null, "metadata": {"file_path": "src/_pytest/cacheprovider.py", "file_name": "cacheprovider.py", "file_type": "text/x-python", "category": "implementation", "start_line": 303, "end_line": 358, "span_ids": ["pytest_addoption"], "tokens": 410}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_addoption(parser):\n group = parser.getgroup(\"general\")\n group.addoption(\n \"--lf\",\n \"--last-failed\",\n action=\"store_true\",\n dest=\"lf\",\n help=\"rerun only the tests that failed \"\n \"at the last run (or all if none failed)\",\n )\n group.addoption(\n \"--ff\",\n \"--failed-first\",\n action=\"store_true\",\n dest=\"failedfirst\",\n help=\"run all tests but run the last failures first. \"\n \"This may re-order tests and thus lead to \"\n \"repeated fixture setup/teardown\",\n )\n group.addoption(\n \"--nf\",\n \"--new-first\",\n action=\"store_true\",\n dest=\"newfirst\",\n help=\"run tests from new files first, then the rest of the tests \"\n \"sorted by file mtime\",\n )\n group.addoption(\n \"--cache-show\",\n action=\"append\",\n nargs=\"?\",\n dest=\"cacheshow\",\n help=(\n \"show cache contents, don't perform collection or tests. \"\n \"Optional argument: glob (default: '*').\"\n ),\n )\n group.addoption(\n \"--cache-clear\",\n action=\"store_true\",\n dest=\"cacheclear\",\n help=\"remove all cache contents at start of test run.\",\n )\n cache_dir_default = \".pytest_cache\"\n if \"TOX_ENV_DIR\" in os.environ:\n cache_dir_default = os.path.join(os.environ[\"TOX_ENV_DIR\"], cache_dir_default)\n parser.addini(\"cache_dir\", default=cache_dir_default, help=\"cache directory path.\")\n group.addoption(\n \"--lfnf\",\n \"--last-failed-no-failures\",\n action=\"store\",\n dest=\"last_failed_no_failures\",\n choices=(\"all\", \"none\"),\n default=\"all\",\n help=\"which tests to run with no previously (known) failures.\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_pytest_cmdline_main_pytest_report_header.if_config_option_verbose_.return._cachedir_format_dis": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_pytest_cmdline_main_pytest_report_header.if_config_option_verbose_.return._cachedir_format_dis", "embedding": null, "metadata": {"file_path": "src/_pytest/cacheprovider.py", "file_name": "cacheprovider.py", "file_type": "text/x-python", "category": "implementation", "start_line": 361, "end_line": 402, "span_ids": ["pytest_cmdline_main", "pytest_report_header", "pytest_configure", "cache"], "tokens": 287}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_cmdline_main(config):\n if config.option.cacheshow:\n from _pytest.main import wrap_session\n\n return wrap_session(config, cacheshow)\n\n\n@pytest.hookimpl(tryfirst=True)\ndef pytest_configure(config):\n config.cache = Cache.for_config(config)\n config.pluginmanager.register(LFPlugin(config), \"lfplugin\")\n config.pluginmanager.register(NFPlugin(config), \"nfplugin\")\n\n\n@pytest.fixture\ndef cache(request):\n \"\"\"\n Return a cache object that can persist state between testing sessions.\n\n cache.get(key, default)\n cache.set(key, value)\n\n Keys must be a ``/`` separated value, where the first part is usually the\n name of your plugin or application to avoid clashes with other cache users.\n\n Values can be any object handled by the json stdlib module.\n \"\"\"\n return request.config.cache\n\n\ndef pytest_report_header(config):\n \"\"\"Display cachedir with --cache-show and if non-default.\"\"\"\n if config.option.verbose > 0 or config.getini(\"cache_dir\") != \".pytest_cache\":\n cachedir = config.cache._cachedir\n # TODO: evaluate generating upward relative paths\n # starting with .., ../.. if sensible\n\n try:\n displaypath = cachedir.relative_to(config.rootdir)\n except ValueError:\n displaypath = cachedir\n return \"cachedir: {}\".format(displaypath)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_cacheshow_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/cacheprovider.py_cacheshow_", "embedding": null, "metadata": {"file_path": "src/_pytest/cacheprovider.py", "file_name": "cacheprovider.py", "file_type": "text/x-python", "category": "implementation", "start_line": 405, "end_line": 443, "span_ids": ["cacheshow"], "tokens": 337}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def cacheshow(config, session):\n from pprint import pformat\n\n tw = py.io.TerminalWriter()\n tw.line(\"cachedir: \" + str(config.cache._cachedir))\n if not config.cache._cachedir.is_dir():\n tw.line(\"cache is empty\")\n return 0\n\n glob = config.option.cacheshow[0]\n if glob is None:\n glob = \"*\"\n\n dummy = object()\n basedir = config.cache._cachedir\n vdir = basedir / \"v\"\n tw.sep(\"-\", \"cache values for %r\" % glob)\n for valpath in sorted(x for x in vdir.rglob(glob) if x.is_file()):\n key = valpath.relative_to(vdir)\n val = config.cache.get(key, dummy)\n if val is dummy:\n tw.line(\"%s contains unreadable content, will be ignored\" % key)\n else:\n tw.line(\"%s contains:\" % key)\n for line in pformat(val).splitlines():\n tw.line(\" \" + line)\n\n ddir = basedir / \"d\"\n if ddir.is_dir():\n contents = sorted(ddir.rglob(glob))\n tw.sep(\"-\", \"cache directories for %r\" % glob)\n for p in contents:\n # if p.check(dir=1):\n # print(\"%s/\" % p.relto(basedir))\n if p.is_file():\n key = p.relative_to(basedir)\n tw.line(\"{} is a file of length {:d}\".format(key, p.stat().st_size))\n return 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py___pytest_addoption.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py___pytest_addoption.None_1", "embedding": null, "metadata": {"file_path": "src/_pytest/capture.py", "file_name": "capture.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 42, "span_ids": ["imports", "pytest_addoption", "docstring", "impl"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nper-test stdout/stderr capturing mechanism.\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport contextlib\nimport io\nimport os\nimport sys\nfrom io import UnsupportedOperation\nfrom tempfile import TemporaryFile\n\nimport six\n\nimport pytest\nfrom _pytest.compat import _PY3\nfrom _pytest.compat import CaptureIO\n\npatchsysdict = {0: \"stdin\", 1: \"stdout\", 2: \"stderr\"}\n\n\ndef pytest_addoption(parser):\n group = parser.getgroup(\"general\")\n group._addoption(\n \"--capture\",\n action=\"store\",\n default=\"fd\" if hasattr(os, \"dup\") else \"sys\",\n metavar=\"method\",\n choices=[\"fd\", \"sys\", \"no\"],\n help=\"per-test capturing method: one of fd|sys|no.\",\n )\n group._addoption(\n \"-s\",\n action=\"store_const\",\n const=\"no\",\n dest=\"capture\",\n help=\"shortcut for --capture=no.\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_pytest_load_initial_conftests_pytest_load_initial_conftests.if_outcome_excinfo_is_not.sys_stderr_write_err_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_pytest_load_initial_conftests_pytest_load_initial_conftests.if_outcome_excinfo_is_not.sys_stderr_write_err_", "embedding": null, "metadata": {"file_path": "src/_pytest/capture.py", "file_name": "capture.py", "file_type": "text/x-python", "category": "implementation", "start_line": 45, "end_line": 66, "span_ids": ["pytest_load_initial_conftests"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.hookimpl(hookwrapper=True)\ndef pytest_load_initial_conftests(early_config, parser, args):\n ns = early_config.known_args_namespace\n if ns.capture == \"fd\":\n _py36_windowsconsoleio_workaround(sys.stdout)\n _colorama_workaround()\n _readline_workaround()\n pluginmanager = early_config.pluginmanager\n capman = CaptureManager(ns.capture)\n pluginmanager.register(capman, \"capturemanager\")\n\n # make sure that capturemanager is properly reset at final shutdown\n early_config.add_cleanup(capman.stop_global_capturing)\n\n # finally trigger conftest loading but while capturing (issue93)\n capman.start_global_capturing()\n outcome = yield\n capman.suspend_global_capture()\n if outcome.excinfo is not None:\n out, err = capman.read_global_capture()\n sys.stdout.write(out)\n sys.stderr.write(err)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_CaptureManager_CaptureManager.activate_fixture.if_fixture_is_not_None_.fixture__start_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_CaptureManager_CaptureManager.activate_fixture.if_fixture_is_not_None_.fixture__start_", "embedding": null, "metadata": {"file_path": "src/_pytest/capture.py", "file_name": "capture.py", "file_type": "text/x-python", "category": "implementation", "start_line": 69, "end_line": 160, "span_ids": ["CaptureManager.read_global_capture", "CaptureManager._getcapture", "CaptureManager.__repr__", "CaptureManager.start_global_capturing", "CaptureManager.activate_fixture", "CaptureManager", "CaptureManager.resume_global_capture", "CaptureManager.stop_global_capturing", "CaptureManager.suspend", "CaptureManager.is_globally_capturing", "CaptureManager.is_capturing", "CaptureManager.resume", "CaptureManager.suspend_global_capture"], "tokens": 806}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CaptureManager(object):\n \"\"\"\n Capture plugin, manages that the appropriate capture method is enabled/disabled during collection and each\n test phase (setup, call, teardown). After each of those points, the captured output is obtained and\n attached to the collection/runtest report.\n\n There are two levels of capture:\n * global: which is enabled by default and can be suppressed by the ``-s`` option. This is always enabled/disabled\n during collection and each test phase.\n * fixture: when a test function or one of its fixture depend on the ``capsys`` or ``capfd`` fixtures. In this\n case special handling is needed to ensure the fixtures take precedence over the global capture.\n \"\"\"\n\n def __init__(self, method):\n self._method = method\n self._global_capturing = None\n self._current_item = None\n\n def __repr__(self):\n return \"\" % (\n self._method,\n self._global_capturing,\n self._current_item,\n )\n\n def _getcapture(self, method):\n if method == \"fd\":\n return MultiCapture(out=True, err=True, Capture=FDCapture)\n elif method == \"sys\":\n return MultiCapture(out=True, err=True, Capture=SysCapture)\n elif method == \"no\":\n return MultiCapture(out=False, err=False, in_=False)\n raise ValueError(\"unknown capturing method: %r\" % method) # pragma: no cover\n\n def is_capturing(self):\n if self.is_globally_capturing():\n return \"global\"\n capture_fixture = getattr(self._current_item, \"_capture_fixture\", None)\n if capture_fixture is not None:\n return (\n \"fixture %s\" % self._current_item._capture_fixture.request.fixturename\n )\n return False\n\n # Global capturing control\n\n def is_globally_capturing(self):\n return self._method != \"no\"\n\n def start_global_capturing(self):\n assert self._global_capturing is None\n self._global_capturing = self._getcapture(self._method)\n self._global_capturing.start_capturing()\n\n def stop_global_capturing(self):\n if self._global_capturing is not None:\n self._global_capturing.pop_outerr_to_orig()\n self._global_capturing.stop_capturing()\n self._global_capturing = None\n\n def resume_global_capture(self):\n # During teardown of the python process, and on rare occasions, capture\n # attributes can be `None` while trying to resume global capture.\n if self._global_capturing is not None:\n self._global_capturing.resume_capturing()\n\n def suspend_global_capture(self, in_=False):\n cap = getattr(self, \"_global_capturing\", None)\n if cap is not None:\n cap.suspend_capturing(in_=in_)\n\n def suspend(self, in_=False):\n # Need to undo local capsys-et-al if it exists before disabling global capture.\n self.suspend_fixture(self._current_item)\n self.suspend_global_capture(in_)\n\n def resume(self):\n self.resume_global_capture()\n self.resume_fixture(self._current_item)\n\n def read_global_capture(self):\n return self._global_capturing.readouterr()\n\n # Fixture Control (it's just forwarding, think about removing this later)\n\n def activate_fixture(self, item):\n \"\"\"If the current item is using ``capsys`` or ``capfd``, activate them so they take precedence over\n the global capture.\n \"\"\"\n fixture = getattr(item, \"_capture_fixture\", None)\n if fixture is not None:\n fixture._start()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_CaptureManager.deactivate_fixture_CaptureManager.pytest_internalerror.self_stop_global_capturin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_CaptureManager.deactivate_fixture_CaptureManager.pytest_internalerror.self_stop_global_capturin", "embedding": null, "metadata": {"file_path": "src/_pytest/capture.py", "file_name": "capture.py", "file_type": "text/x-python", "category": "implementation", "start_line": 162, "end_line": 247, "span_ids": ["CaptureManager.deactivate_fixture", "CaptureManager.pytest_runtest_teardown", "CaptureManager.suspend_fixture", "CaptureManager.item_capture", "CaptureManager.pytest_make_collect_report", "CaptureManager.pytest_keyboard_interrupt", "CaptureManager.global_and_fixture_disabled", "CaptureManager.pytest_runtest_setup", "CaptureManager.pytest_runtest_protocol", "CaptureManager.resume_fixture", "CaptureManager.pytest_runtest_call", "CaptureManager.pytest_internalerror"], "tokens": 573}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CaptureManager(object):\n\n def deactivate_fixture(self, item):\n \"\"\"Deactivates the ``capsys`` or ``capfd`` fixture of this item, if any.\"\"\"\n fixture = getattr(item, \"_capture_fixture\", None)\n if fixture is not None:\n fixture.close()\n\n def suspend_fixture(self, item):\n fixture = getattr(item, \"_capture_fixture\", None)\n if fixture is not None:\n fixture._suspend()\n\n def resume_fixture(self, item):\n fixture = getattr(item, \"_capture_fixture\", None)\n if fixture is not None:\n fixture._resume()\n\n # Helper context managers\n\n @contextlib.contextmanager\n def global_and_fixture_disabled(self):\n \"\"\"Context manager to temporarily disable global and current fixture capturing.\"\"\"\n self.suspend()\n try:\n yield\n finally:\n self.resume()\n\n @contextlib.contextmanager\n def item_capture(self, when, item):\n self.resume_global_capture()\n self.activate_fixture(item)\n try:\n yield\n finally:\n self.deactivate_fixture(item)\n self.suspend_global_capture(in_=False)\n\n out, err = self.read_global_capture()\n item.add_report_section(when, \"stdout\", out)\n item.add_report_section(when, \"stderr\", err)\n\n # Hooks\n\n @pytest.hookimpl(hookwrapper=True)\n def pytest_make_collect_report(self, collector):\n if isinstance(collector, pytest.File):\n self.resume_global_capture()\n outcome = yield\n self.suspend_global_capture()\n out, err = self.read_global_capture()\n rep = outcome.get_result()\n if out:\n rep.sections.append((\"Captured stdout\", out))\n if err:\n rep.sections.append((\"Captured stderr\", err))\n else:\n yield\n\n @pytest.hookimpl(hookwrapper=True)\n def pytest_runtest_protocol(self, item):\n self._current_item = item\n yield\n self._current_item = None\n\n @pytest.hookimpl(hookwrapper=True)\n def pytest_runtest_setup(self, item):\n with self.item_capture(\"setup\", item):\n yield\n\n @pytest.hookimpl(hookwrapper=True)\n def pytest_runtest_call(self, item):\n with self.item_capture(\"call\", item):\n yield\n\n @pytest.hookimpl(hookwrapper=True)\n def pytest_runtest_teardown(self, item):\n with self.item_capture(\"teardown\", item):\n yield\n\n @pytest.hookimpl(tryfirst=True)\n def pytest_keyboard_interrupt(self, excinfo):\n self.stop_global_capturing()\n\n @pytest.hookimpl(tryfirst=True)\n def pytest_internalerror(self, excinfo):\n self.stop_global_capturing()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_capture_fixtures__ensure_only_one_capture_fixture.if_fixtures_.raise_request_raiseerror_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_capture_fixtures__ensure_only_one_capture_fixture.if_fixtures_.raise_request_raiseerror_", "embedding": null, "metadata": {"file_path": "src/_pytest/capture.py", "file_name": "capture.py", "file_type": "text/x-python", "category": "implementation", "start_line": 250, "end_line": 260, "span_ids": ["_ensure_only_one_capture_fixture", "impl:3"], "tokens": 108}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "capture_fixtures = {\"capfd\", \"capfdbinary\", \"capsys\", \"capsysbinary\"}\n\n\ndef _ensure_only_one_capture_fixture(request, name):\n fixtures = set(request.fixturenames) & capture_fixtures - {name}\n if fixtures:\n fixtures = sorted(fixtures)\n fixtures = fixtures[0] if len(fixtures) == 1 else fixtures\n raise request.raiseerror(\n \"cannot use {} and {} at the same time\".format(fixtures, name)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_capsys_capsys.with__install_capture_fix.yield_fixture": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_capsys_capsys.with__install_capture_fix.yield_fixture", "embedding": null, "metadata": {"file_path": "src/_pytest/capture.py", "file_name": "capture.py", "file_type": "text/x-python", "category": "implementation", "start_line": 263, "end_line": 273, "span_ids": ["capsys"], "tokens": 107}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture\ndef capsys(request):\n \"\"\"Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``.\n\n The captured output is made available via ``capsys.readouterr()`` method\n calls, which return a ``(out, err)`` namedtuple.\n ``out`` and ``err`` will be ``text`` objects.\n \"\"\"\n _ensure_only_one_capture_fixture(request, \"capsys\")\n with _install_capture_fixture_on_item(request, SysCapture) as fixture:\n yield fixture", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_capsysbinary_capsysbinary.with__install_capture_fix.yield_fixture": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_capsysbinary_capsysbinary.with__install_capture_fix.yield_fixture", "embedding": null, "metadata": {"file_path": "src/_pytest/capture.py", "file_name": "capture.py", "file_type": "text/x-python", "category": "implementation", "start_line": 276, "end_line": 290, "span_ids": ["capsysbinary"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture\ndef capsysbinary(request):\n \"\"\"Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``.\n\n The captured output is made available via ``capsysbinary.readouterr()``\n method calls, which return a ``(out, err)`` namedtuple.\n ``out`` and ``err`` will be ``bytes`` objects.\n \"\"\"\n _ensure_only_one_capture_fixture(request, \"capsysbinary\")\n # Currently, the implementation uses the python3 specific `.buffer`\n # property of CaptureIO.\n if sys.version_info < (3,):\n raise request.raiseerror(\"capsysbinary is only supported on Python 3\")\n with _install_capture_fixture_on_item(request, SysCaptureBinary) as fixture:\n yield fixture", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_capfd_capfd.with__install_capture_fix.yield_fixture": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_capfd_capfd.with__install_capture_fix.yield_fixture", "embedding": null, "metadata": {"file_path": "src/_pytest/capture.py", "file_name": "capture.py", "file_type": "text/x-python", "category": "implementation", "start_line": 293, "end_line": 307, "span_ids": ["capfd"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture\ndef capfd(request):\n \"\"\"Enable text capturing of writes to file descriptors ``1`` and ``2``.\n\n The captured output is made available via ``capfd.readouterr()`` method\n calls, which return a ``(out, err)`` namedtuple.\n ``out`` and ``err`` will be ``text`` objects.\n \"\"\"\n _ensure_only_one_capture_fixture(request, \"capfd\")\n if not hasattr(os, \"dup\"):\n pytest.skip(\n \"capfd fixture needs os.dup function which is not available in this system\"\n )\n with _install_capture_fixture_on_item(request, FDCapture) as fixture:\n yield fixture", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_capfdbinary_capfdbinary.with__install_capture_fix.yield_fixture": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_capfdbinary_capfdbinary.with__install_capture_fix.yield_fixture", "embedding": null, "metadata": {"file_path": "src/_pytest/capture.py", "file_name": "capture.py", "file_type": "text/x-python", "category": "implementation", "start_line": 310, "end_line": 324, "span_ids": ["capfdbinary"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture\ndef capfdbinary(request):\n \"\"\"Enable bytes capturing of writes to file descriptors ``1`` and ``2``.\n\n The captured output is made available via ``capfd.readouterr()`` method\n calls, which return a ``(out, err)`` namedtuple.\n ``out`` and ``err`` will be ``byte`` objects.\n \"\"\"\n _ensure_only_one_capture_fixture(request, \"capfdbinary\")\n if not hasattr(os, \"dup\"):\n pytest.skip(\n \"capfdbinary fixture needs os.dup function which is not available in this system\"\n )\n with _install_capture_fixture_on_item(request, FDCaptureBinary) as fixture:\n yield fixture", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py__install_capture_fixture_on_item__install_capture_fixture_on_item.del_request_node__capture": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py__install_capture_fixture_on_item__install_capture_fixture_on_item.del_request_node__capture", "embedding": null, "metadata": {"file_path": "src/_pytest/capture.py", "file_name": "capture.py", "file_type": "text/x-python", "category": "implementation", "start_line": 327, "end_line": 344, "span_ids": ["_install_capture_fixture_on_item"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@contextlib.contextmanager\ndef _install_capture_fixture_on_item(request, capture_class):\n \"\"\"\n Context manager which creates a ``CaptureFixture`` instance and \"installs\" it on\n the item/node of the given request. Used by ``capsys`` and ``capfd``.\n\n The CaptureFixture is added as attribute of the item because it needs to accessed\n by ``CaptureManager`` during its ``pytest_runtest_*`` hooks.\n \"\"\"\n request.node._capture_fixture = fixture = CaptureFixture(capture_class, request)\n capmanager = request.config.pluginmanager.getplugin(\"capturemanager\")\n # Need to active this fixture right away in case it is being used by another fixture (setup phase).\n # If this fixture is being used only by a test function (call phase), then we wouldn't need this\n # activation, but it doesn't hurt.\n capmanager.activate_fixture(request.node)\n yield fixture\n fixture.close()\n del request.node._capture_fixture", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_CaptureFixture_CaptureFixture.close.if_self__capture_is_not_N.self._capture.None": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_CaptureFixture_CaptureFixture.close.if_self__capture_is_not_N.self._capture.None", "embedding": null, "metadata": {"file_path": "src/_pytest/capture.py", "file_name": "capture.py", "file_type": "text/x-python", "category": "implementation", "start_line": 347, "end_line": 374, "span_ids": ["CaptureFixture", "CaptureFixture._start", "CaptureFixture.close"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CaptureFixture(object):\n \"\"\"\n Object returned by :py:func:`capsys`, :py:func:`capsysbinary`, :py:func:`capfd` and :py:func:`capfdbinary`\n fixtures.\n \"\"\"\n\n def __init__(self, captureclass, request):\n self.captureclass = captureclass\n self.request = request\n self._capture = None\n self._captured_out = self.captureclass.EMPTY_BUFFER\n self._captured_err = self.captureclass.EMPTY_BUFFER\n\n def _start(self):\n # Start if not started yet\n if getattr(self, \"_capture\", None) is None:\n self._capture = MultiCapture(\n out=True, err=True, in_=False, Capture=self.captureclass\n )\n self._capture.start_capturing()\n\n def close(self):\n if self._capture is not None:\n out, err = self._capture.pop_outerr_to_orig()\n self._captured_out += out\n self._captured_err += err\n self._capture.stop_capturing()\n self._capture = None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_CaptureFixture.readouterr_CaptureFixture.disabled.with_capmanager_global_an.yield": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_CaptureFixture.readouterr_CaptureFixture.disabled.with_capmanager_global_an.yield", "embedding": null, "metadata": {"file_path": "src/_pytest/capture.py", "file_name": "capture.py", "file_type": "text/x-python", "category": "implementation", "start_line": 376, "end_line": 403, "span_ids": ["CaptureFixture._suspend", "CaptureFixture._resume", "CaptureFixture.readouterr", "CaptureFixture.disabled"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CaptureFixture(object):\n\n def readouterr(self):\n \"\"\"Read and return the captured output so far, resetting the internal buffer.\n\n :return: captured content as a namedtuple with ``out`` and ``err`` string attributes\n \"\"\"\n captured_out, captured_err = self._captured_out, self._captured_err\n if self._capture is not None:\n out, err = self._capture.readouterr()\n captured_out += out\n captured_err += err\n self._captured_out = self.captureclass.EMPTY_BUFFER\n self._captured_err = self.captureclass.EMPTY_BUFFER\n return CaptureResult(captured_out, captured_err)\n\n def _suspend(self):\n \"\"\"Suspends this fixture's own capturing temporarily.\"\"\"\n self._capture.suspend_capturing()\n\n def _resume(self):\n \"\"\"Resumes this fixture's own capturing temporarily.\"\"\"\n self._capture.resume_capturing()\n\n @contextlib.contextmanager\n def disabled(self):\n \"\"\"Temporarily disables capture while inside the 'with' block.\"\"\"\n capmanager = self.request.config.pluginmanager.getplugin(\"capturemanager\")\n with capmanager.global_and_fixture_disabled():\n yield", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_safe_text_dupfile_safe_text_dupfile.return.EncodedFile_f_encoding_o": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_safe_text_dupfile_safe_text_dupfile.return.EncodedFile_f_encoding_o", "embedding": null, "metadata": {"file_path": "src/_pytest/capture.py", "file_name": "capture.py", "file_type": "text/x-python", "category": "implementation", "start_line": 406, "end_line": 422, "span_ids": ["safe_text_dupfile"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def safe_text_dupfile(f, mode, default_encoding=\"UTF8\"):\n \"\"\" return an open text file object that's a duplicate of f on the\n FD-level if possible.\n \"\"\"\n encoding = getattr(f, \"encoding\", None)\n try:\n fd = f.fileno()\n except Exception:\n if \"b\" not in getattr(f, \"mode\", \"\") and hasattr(f, \"encoding\"):\n # we seem to have a text stream, let's just use it\n return f\n else:\n newfd = os.dup(fd)\n if \"b\" not in mode:\n mode += \"b\"\n f = os.fdopen(newfd, mode, 0) # no buffering\n return EncodedFile(f, encoding or default_encoding)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_EncodedFile_CaptureResult.collections_namedtuple_C": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_EncodedFile_CaptureResult.collections_namedtuple_C", "embedding": null, "metadata": {"file_path": "src/_pytest/capture.py", "file_name": "capture.py", "file_type": "text/x-python", "category": "implementation", "start_line": 425, "end_line": 454, "span_ids": ["EncodedFile.__getattr__", "EncodedFile.write", "EncodedFile.writelines", "EncodedFile.name", "impl:5", "EncodedFile"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class EncodedFile(object):\n errors = \"strict\" # possibly needed by py3 code (issue555)\n\n def __init__(self, buffer, encoding):\n self.buffer = buffer\n self.encoding = encoding\n\n def write(self, obj):\n if isinstance(obj, six.text_type):\n obj = obj.encode(self.encoding, \"replace\")\n elif _PY3:\n raise TypeError(\n \"write() argument must be str, not {}\".format(type(obj).__name__)\n )\n self.buffer.write(obj)\n\n def writelines(self, linelist):\n data = \"\".join(linelist)\n self.write(data)\n\n @property\n def name(self):\n \"\"\"Ensure that file.name is a string.\"\"\"\n return repr(self.buffer)\n\n def __getattr__(self, name):\n return getattr(object.__getattribute__(self, \"buffer\"), name)\n\n\nCaptureResult = collections.namedtuple(\"CaptureResult\", [\"out\", \"err\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_MultiCapture_NoCapture.__init__.start.done.suspend.resume.lambda_args_None": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_MultiCapture_NoCapture.__init__.start.done.suspend.resume.lambda_args_None", "embedding": null, "metadata": {"file_path": "src/_pytest/capture.py", "file_name": "capture.py", "file_type": "text/x-python", "category": "implementation", "start_line": 461, "end_line": 542, "span_ids": ["MultiCapture.pop_outerr_to_orig", "MultiCapture.resume_capturing", "MultiCapture.stop_capturing", "MultiCapture.__repr__", "MultiCapture", "NoCapture", "MultiCapture.start_capturing", "MultiCapture.suspend_capturing", "MultiCapture.readouterr"], "tokens": 544}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class MultiCapture(object):\n out = err = in_ = None\n _state = None\n\n def __init__(self, out=True, err=True, in_=True, Capture=None):\n if in_:\n self.in_ = Capture(0)\n if out:\n self.out = Capture(1)\n if err:\n self.err = Capture(2)\n\n def __repr__(self):\n return \"\" % (\n self.out,\n self.err,\n self.in_,\n self._state,\n getattr(self, \"_in_suspended\", \"\"),\n )\n\n def start_capturing(self):\n self._state = \"started\"\n if self.in_:\n self.in_.start()\n if self.out:\n self.out.start()\n if self.err:\n self.err.start()\n\n def pop_outerr_to_orig(self):\n \"\"\" pop current snapshot out/err capture and flush to orig streams. \"\"\"\n out, err = self.readouterr()\n if out:\n self.out.writeorg(out)\n if err:\n self.err.writeorg(err)\n return out, err\n\n def suspend_capturing(self, in_=False):\n self._state = \"suspended\"\n if self.out:\n self.out.suspend()\n if self.err:\n self.err.suspend()\n if in_ and self.in_:\n self.in_.suspend()\n self._in_suspended = True\n\n def resume_capturing(self):\n self._state = \"resumed\"\n if self.out:\n self.out.resume()\n if self.err:\n self.err.resume()\n if hasattr(self, \"_in_suspended\"):\n self.in_.resume()\n del self._in_suspended\n\n def stop_capturing(self):\n \"\"\" stop capturing and reset capturing streams \"\"\"\n if self._state == \"stopped\":\n raise ValueError(\"was already stopped\")\n self._state = \"stopped\"\n if self.out:\n self.out.done()\n if self.err:\n self.err.done()\n if self.in_:\n self.in_.done()\n\n def readouterr(self):\n \"\"\" return snapshot unicode value of stdout/stderr capturings. \"\"\"\n return CaptureResult(\n self.out.snap() if self.out is not None else \"\",\n self.err.snap() if self.err is not None else \"\",\n )\n\n\nclass NoCapture(object):\n EMPTY_BUFFER = None\n __init__ = start = done = suspend = resume = lambda *args: None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_FDCaptureBinary_FDCaptureBinary.writeorg.os_write_self_targetfd_sa": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_FDCaptureBinary_FDCaptureBinary.writeorg.os_write_self_targetfd_sa", "embedding": null, "metadata": {"file_path": "src/_pytest/capture.py", "file_name": "capture.py", "file_type": "text/x-python", "category": "implementation", "start_line": 545, "end_line": 626, "span_ids": ["FDCaptureBinary.suspend", "FDCaptureBinary.done", "FDCaptureBinary.writeorg", "FDCaptureBinary", "FDCaptureBinary.snap", "FDCaptureBinary.__repr__", "FDCaptureBinary.start", "FDCaptureBinary.resume"], "tokens": 604}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FDCaptureBinary(object):\n \"\"\"Capture IO to/from a given os-level filedescriptor.\n\n snap() produces `bytes`\n \"\"\"\n\n EMPTY_BUFFER = b\"\"\n _state = None\n\n def __init__(self, targetfd, tmpfile=None):\n self.targetfd = targetfd\n try:\n self.targetfd_save = os.dup(self.targetfd)\n except OSError:\n self.start = lambda: None\n self.done = lambda: None\n else:\n if targetfd == 0:\n assert not tmpfile, \"cannot set tmpfile with stdin\"\n tmpfile = open(os.devnull, \"r\")\n self.syscapture = SysCapture(targetfd)\n else:\n if tmpfile is None:\n f = TemporaryFile()\n with f:\n tmpfile = safe_text_dupfile(f, mode=\"wb+\")\n if targetfd in patchsysdict:\n self.syscapture = SysCapture(targetfd, tmpfile)\n else:\n self.syscapture = NoCapture()\n self.tmpfile = tmpfile\n self.tmpfile_fd = tmpfile.fileno()\n\n def __repr__(self):\n return \"\" % (\n self.targetfd,\n getattr(self, \"targetfd_save\", None),\n self._state,\n )\n\n def start(self):\n \"\"\" Start capturing on targetfd using memorized tmpfile. \"\"\"\n try:\n os.fstat(self.targetfd_save)\n except (AttributeError, OSError):\n raise ValueError(\"saved filedescriptor not valid anymore\")\n os.dup2(self.tmpfile_fd, self.targetfd)\n self.syscapture.start()\n self._state = \"started\"\n\n def snap(self):\n self.tmpfile.seek(0)\n res = self.tmpfile.read()\n self.tmpfile.seek(0)\n self.tmpfile.truncate()\n return res\n\n def done(self):\n \"\"\" stop capturing, restore streams, return original capture file,\n seeked to position zero. \"\"\"\n targetfd_save = self.__dict__.pop(\"targetfd_save\")\n os.dup2(targetfd_save, self.targetfd)\n os.close(targetfd_save)\n self.syscapture.done()\n _attempt_to_close_capture_file(self.tmpfile)\n self._state = \"done\"\n\n def suspend(self):\n self.syscapture.suspend()\n os.dup2(self.targetfd_save, self.targetfd)\n self._state = \"suspended\"\n\n def resume(self):\n self.syscapture.resume()\n os.dup2(self.tmpfile_fd, self.targetfd)\n self._state = \"resumed\"\n\n def writeorg(self, data):\n \"\"\" write to original file descriptor. \"\"\"\n if isinstance(data, six.text_type):\n data = data.encode(\"utf8\") # XXX use encoding of original stream\n os.write(self.targetfd_save, data)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_FDCapture_SysCapture.writeorg.self__old_flush_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_FDCapture_SysCapture.writeorg.self__old_flush_", "embedding": null, "metadata": {"file_path": "src/_pytest/capture.py", "file_name": "capture.py", "file_type": "text/x-python", "category": "implementation", "start_line": 629, "end_line": 695, "span_ids": ["SysCapture.__repr__", "SysCapture.start", "SysCapture.suspend", "SysCapture.resume", "SysCapture.writeorg", "SysCapture.snap", "SysCapture", "FDCapture.snap", "FDCapture", "SysCapture.done"], "tokens": 407}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FDCapture(FDCaptureBinary):\n \"\"\"Capture IO to/from a given os-level filedescriptor.\n\n snap() produces text\n \"\"\"\n\n EMPTY_BUFFER = str()\n\n def snap(self):\n res = super(FDCapture, self).snap()\n enc = getattr(self.tmpfile, \"encoding\", None)\n if enc and isinstance(res, bytes):\n res = six.text_type(res, enc, \"replace\")\n return res\n\n\nclass SysCapture(object):\n\n EMPTY_BUFFER = str()\n _state = None\n\n def __init__(self, fd, tmpfile=None):\n name = patchsysdict[fd]\n self._old = getattr(sys, name)\n self.name = name\n if tmpfile is None:\n if name == \"stdin\":\n tmpfile = DontReadFromInput()\n else:\n tmpfile = CaptureIO()\n self.tmpfile = tmpfile\n\n def __repr__(self):\n return \"\" % (\n self.name,\n self._old,\n self.tmpfile,\n self._state,\n )\n\n def start(self):\n setattr(sys, self.name, self.tmpfile)\n self._state = \"started\"\n\n def snap(self):\n res = self.tmpfile.getvalue()\n self.tmpfile.seek(0)\n self.tmpfile.truncate()\n return res\n\n def done(self):\n setattr(sys, self.name, self._old)\n del self._old\n _attempt_to_close_capture_file(self.tmpfile)\n self._state = \"done\"\n\n def suspend(self):\n setattr(sys, self.name, self._old)\n self._state = \"suspended\"\n\n def resume(self):\n setattr(sys, self.name, self.tmpfile)\n self._state = \"resumed\"\n\n def writeorg(self, data):\n self._old.write(data)\n self._old.flush()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_SysCaptureBinary_DontReadFromInput.buffer.if_sys_version_info_3.else_.raise_AttributeError_red": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py_SysCaptureBinary_DontReadFromInput.buffer.if_sys_version_info_3.else_.raise_AttributeError_red", "embedding": null, "metadata": {"file_path": "src/_pytest/capture.py", "file_name": "capture.py", "file_type": "text/x-python", "category": "implementation", "start_line": 698, "end_line": 742, "span_ids": ["DontReadFromInput.read", "DontReadFromInput.close", "DontReadFromInput:5", "SysCaptureBinary.snap", "DontReadFromInput.__iter__", "DontReadFromInput.fileno", "DontReadFromInput.isatty", "SysCaptureBinary", "DontReadFromInput", "DontReadFromInput.buffer"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SysCaptureBinary(SysCapture):\n EMPTY_BUFFER = b\"\"\n\n def snap(self):\n res = self.tmpfile.buffer.getvalue()\n self.tmpfile.seek(0)\n self.tmpfile.truncate()\n return res\n\n\nclass DontReadFromInput(six.Iterator):\n \"\"\"Temporary stub class. Ideally when stdin is accessed, the\n capturing should be turned off, with possibly all data captured\n so far sent to the screen. This should be configurable, though,\n because in automated test runs it is better to crash than\n hang indefinitely.\n \"\"\"\n\n encoding = None\n\n def read(self, *args):\n raise IOError(\"reading from stdin while output is captured\")\n\n readline = read\n readlines = read\n __next__ = read\n\n def __iter__(self):\n return self\n\n def fileno(self):\n raise UnsupportedOperation(\"redirected stdin is pseudofile, has no fileno()\")\n\n def isatty(self):\n return False\n\n def close(self):\n pass\n\n @property\n def buffer(self):\n if sys.version_info >= (3, 0):\n return self\n else:\n raise AttributeError(\"redirected stdin has no attribute buffer\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py__colorama_workaround__readline_workaround.if_sys_platform_startswit.try_.except_ImportError_.pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py__colorama_workaround__readline_workaround.if_sys_platform_startswit.try_.except_ImportError_.pass", "embedding": null, "metadata": {"file_path": "src/_pytest/capture.py", "file_name": "capture.py", "file_type": "text/x-python", "category": "implementation", "start_line": 745, "end_line": 783, "span_ids": ["_colorama_workaround", "_readline_workaround"], "tokens": 321}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _colorama_workaround():\n \"\"\"\n Ensure colorama is imported so that it attaches to the correct stdio\n handles on Windows.\n\n colorama uses the terminal on import time. So if something does the\n first import of colorama while I/O capture is active, colorama will\n fail in various ways.\n \"\"\"\n if sys.platform.startswith(\"win32\"):\n try:\n import colorama # noqa: F401\n except ImportError:\n pass\n\n\ndef _readline_workaround():\n \"\"\"\n Ensure readline is imported so that it attaches to the correct stdio\n handles on Windows.\n\n Pdb uses readline support where available--when not running from the Python\n prompt, the readline module is not imported until running the pdb REPL. If\n running pytest with the --pdb option this means the readline module is not\n imported until after I/O capture has been started.\n\n This is a problem for pyreadline, which is often used to implement readline\n support on Windows, as it does not attach to the correct handles for stdout\n and/or stdin if they have been redirected by the FDCapture mechanism. This\n workaround ensures that readline is imported before I/O capture is setup so\n that it can attach to the actual stdin/out for the console.\n\n See https://github.com/pytest-dev/pytest/pull/1281\n \"\"\"\n if sys.platform.startswith(\"win32\"):\n try:\n import readline # noqa: F401\n except ImportError:\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py__py36_windowsconsoleio_workaround_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/capture.py__py36_windowsconsoleio_workaround_", "embedding": null, "metadata": {"file_path": "src/_pytest/capture.py", "file_name": "capture.py", "file_type": "text/x-python", "category": "implementation", "start_line": 786, "end_line": 849, "span_ids": ["_attempt_to_close_capture_file", "_py36_windowsconsoleio_workaround"], "tokens": 508}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _py36_windowsconsoleio_workaround(stream):\n \"\"\"\n Python 3.6 implemented unicode console handling for Windows. This works\n by reading/writing to the raw console handle using\n ``{Read,Write}ConsoleW``.\n\n The problem is that we are going to ``dup2`` over the stdio file\n descriptors when doing ``FDCapture`` and this will ``CloseHandle`` the\n handles used by Python to write to the console. Though there is still some\n weirdness and the console handle seems to only be closed randomly and not\n on the first call to ``CloseHandle``, or maybe it gets reopened with the\n same handle value when we suspend capturing.\n\n The workaround in this case will reopen stdio with a different fd which\n also means a different handle by replicating the logic in\n \"Py_lifecycle.c:initstdio/create_stdio\".\n\n :param stream: in practice ``sys.stdout`` or ``sys.stderr``, but given\n here as parameter for unittesting purposes.\n\n See https://github.com/pytest-dev/py/issues/103\n \"\"\"\n if not sys.platform.startswith(\"win32\") or sys.version_info[:2] < (3, 6):\n return\n\n # bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666)\n if not hasattr(stream, \"buffer\"):\n return\n\n buffered = hasattr(stream.buffer, \"raw\")\n raw_stdout = stream.buffer.raw if buffered else stream.buffer\n\n if not isinstance(raw_stdout, io._WindowsConsoleIO):\n return\n\n def _reopen_stdio(f, mode):\n if not buffered and mode[0] == \"w\":\n buffering = 0\n else:\n buffering = -1\n\n return io.TextIOWrapper(\n open(os.dup(f.fileno()), mode, buffering),\n f.encoding,\n f.errors,\n f.newlines,\n f.line_buffering,\n )\n\n sys.stdin = _reopen_stdio(sys.stdin, \"rb\")\n sys.stdout = _reopen_stdio(sys.stdout, \"wb\")\n sys.stderr = _reopen_stdio(sys.stderr, \"wb\")\n\n\ndef _attempt_to_close_capture_file(f):\n \"\"\"Suppress IOError when closing the temporary file used for capturing streams in py27 (#2370)\"\"\"\n if six.PY2:\n try:\n f.close()\n except IOError:\n pass\n else:\n f.close()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/compat.py___getlocation.return._s_d_fn_lineno_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/compat.py___getlocation.return._s_d_fn_lineno_1", "embedding": null, "metadata": {"file_path": "src/_pytest/compat.py", "file_name": "compat.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 98, "span_ids": ["_format_args", "imports:19", "impl:25", "imports:22", "impl", "imports:18", "impl:10", "docstring", "iscoroutinefunction", "imports:24", "impl:11", "imports", "docstring:2", "imports:20", "impl:23", "impl:22", "is_generator", "docstring:3", "getlocation", "impl:2", "impl:24", "imports:17", "imports:23"], "tokens": 634}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\npython version compatibility code\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport codecs\nimport functools\nimport inspect\nimport re\nimport sys\nfrom contextlib import contextmanager\n\nimport py\nimport six\nfrom six import text_type\n\nimport _pytest\nfrom _pytest._io.saferepr import saferepr\nfrom _pytest.outcomes import fail\nfrom _pytest.outcomes import TEST_OUTCOME\n\ntry:\n import enum\nexcept ImportError: # pragma: no cover\n # Only available in Python 3.4+ or as a backport\n enum = None\n\n_PY3 = sys.version_info > (3, 0)\n_PY2 = not _PY3\n\n\nif _PY3:\n from inspect import signature, Parameter as Parameter\nelse:\n from funcsigs import signature, Parameter as Parameter\n\nNoneType = type(None)\nNOTSET = object()\n\nPY35 = sys.version_info[:2] >= (3, 5)\nPY36 = sys.version_info[:2] >= (3, 6)\nMODULE_NOT_FOUND_ERROR = \"ModuleNotFoundError\" if PY36 else \"ImportError\"\n\n\nif _PY3:\n from collections.abc import MutableMapping as MappingMixin\n from collections.abc import Iterable, Mapping, Sequence, Sized\nelse:\n # those raise DeprecationWarnings in Python >=3.7\n from collections import MutableMapping as MappingMixin # noqa\n from collections import Iterable, Mapping, Sequence, Sized # noqa\n\n\nif sys.version_info >= (3, 4):\n from importlib.util import spec_from_file_location\nelse:\n\n def spec_from_file_location(*_, **__):\n return None\n\n\ndef _format_args(func):\n return str(signature(func))\n\n\nisfunction = inspect.isfunction\nisclass = inspect.isclass\n# used to work around a python2 exception info leak\nexc_clear = getattr(sys, \"exc_clear\", lambda: None)\n# The type of re.compile objects is not exposed in Python.\nREGEX_TYPE = type(re.compile(\"\"))\n\n\ndef is_generator(func):\n genfunc = inspect.isgeneratorfunction(func)\n return genfunc and not iscoroutinefunction(func)\n\n\ndef iscoroutinefunction(func):\n \"\"\"Return True if func is a decorated coroutine function.\n\n Note: copied and modified from Python 3.5's builtin couroutines.py to avoid import asyncio directly,\n which in turns also initializes the \"logging\" module as side-effect (see issue #8).\n \"\"\"\n return getattr(func, \"_is_coroutine\", False) or (\n hasattr(inspect, \"iscoroutinefunction\") and inspect.iscoroutinefunction(func)\n )\n\n\ndef getlocation(function, curdir):\n function = get_real_func(function)\n fn = py.path.local(inspect.getfile(function))\n lineno = function.__code__.co_firstlineno\n if fn.relto(curdir):\n fn = fn.relto(curdir)\n return \"%s:%d\" % (fn, lineno + 1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/compat.py_num_mock_patch_args_num_mock_patch_args.return.len_patchings_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/compat.py_num_mock_patch_args_num_mock_patch_args.return.len_patchings_", "embedding": null, "metadata": {"file_path": "src/_pytest/compat.py", "file_name": "compat.py", "file_type": "text/x-python", "category": "implementation", "start_line": 101, "end_line": 112, "span_ids": ["num_mock_patch_args"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def num_mock_patch_args(function):\n \"\"\" return number of arguments used up by mock arguments (if any) \"\"\"\n patchings = getattr(function, \"patchings\", None)\n if not patchings:\n return 0\n mock_modules = [sys.modules.get(\"mock\"), sys.modules.get(\"unittest.mock\")]\n if any(mock_modules):\n sentinels = [m.DEFAULT for m in mock_modules if m is not None]\n return len(\n [p for p in patchings if not p.attribute_name and p.new in sentinels]\n )\n return len(patchings)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/compat.py_getfuncargnames_getfuncargnames.return.arg_names": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/compat.py_getfuncargnames_getfuncargnames.return.arg_names", "embedding": null, "metadata": {"file_path": "src/_pytest/compat.py", "file_name": "compat.py", "file_type": "text/x-python", "category": "implementation", "start_line": 115, "end_line": 165, "span_ids": ["getfuncargnames"], "tokens": 438}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def getfuncargnames(function, is_method=False, cls=None):\n \"\"\"Returns the names of a function's mandatory arguments.\n\n This should return the names of all function arguments that:\n * Aren't bound to an instance or type as in instance or class methods.\n * Don't have default values.\n * Aren't bound with functools.partial.\n * Aren't replaced with mocks.\n\n The is_method and cls arguments indicate that the function should\n be treated as a bound method even though it's not unless, only in\n the case of cls, the function is a static method.\n\n @RonnyPfannschmidt: This function should be refactored when we\n revisit fixtures. The fixture mechanism should ask the node for\n the fixture names, and not try to obtain directly from the\n function object well after collection has occurred.\n\n \"\"\"\n # The parameters attribute of a Signature object contains an\n # ordered mapping of parameter names to Parameter instances. This\n # creates a tuple of the names of the parameters that don't have\n # defaults.\n try:\n parameters = signature(function).parameters\n except (ValueError, TypeError) as e:\n fail(\n \"Could not determine arguments of {!r}: {}\".format(function, e),\n pytrace=False,\n )\n\n arg_names = tuple(\n p.name\n for p in parameters.values()\n if (\n p.kind is Parameter.POSITIONAL_OR_KEYWORD\n or p.kind is Parameter.KEYWORD_ONLY\n )\n and p.default is Parameter.empty\n )\n # If this function should be treated as a bound method even though\n # it's passed as an unbound method or function, remove the first\n # parameter name.\n if is_method or (\n cls and not isinstance(cls.__dict__.get(function.__name__, None), staticmethod)\n ):\n arg_names = arg_names[1:]\n # Remove any names that will be replaced with mocks.\n if hasattr(function, \"__wrapped__\"):\n arg_names = arg_names[num_mock_patch_args(function) :]\n return arg_names", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/compat.py_dummy_context_manager_None_3.else_.ascii_escaped.return._translate_non_printable_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/compat.py_dummy_context_manager_None_3.else_.ascii_escaped.return._translate_non_printable_", "embedding": null, "metadata": {"file_path": "src/_pytest/compat.py", "file_name": "compat.py", "file_type": "text/x-python", "category": "implementation", "start_line": 168, "end_line": 263, "span_ids": ["dummy_context_manager", "impl:36", "_translate_non_printable", "impl:33", "get_default_arg_names"], "tokens": 722}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@contextmanager\ndef dummy_context_manager():\n \"\"\"Context manager that does nothing, useful in situations where you might need an actual context manager or not\n depending on some condition. Using this allow to keep the same code\"\"\"\n yield\n\n\ndef get_default_arg_names(function):\n # Note: this code intentionally mirrors the code at the beginning of getfuncargnames,\n # to get the arguments which were excluded from its result because they had default values\n return tuple(\n p.name\n for p in signature(function).parameters.values()\n if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY)\n and p.default is not Parameter.empty\n )\n\n\n_non_printable_ascii_translate_table = {\n i: u\"\\\\x{:02x}\".format(i) for i in range(128) if i not in range(32, 127)\n}\n_non_printable_ascii_translate_table.update(\n {ord(\"\\t\"): u\"\\\\t\", ord(\"\\r\"): u\"\\\\r\", ord(\"\\n\"): u\"\\\\n\"}\n)\n\n\ndef _translate_non_printable(s):\n return s.translate(_non_printable_ascii_translate_table)\n\n\nif _PY3:\n STRING_TYPES = bytes, str\n UNICODE_TYPES = six.text_type\n\n if PY35:\n\n def _bytes_to_ascii(val):\n return val.decode(\"ascii\", \"backslashreplace\")\n\n else:\n\n def _bytes_to_ascii(val):\n if val:\n # source: http://goo.gl/bGsnwC\n encoded_bytes, _ = codecs.escape_encode(val)\n return encoded_bytes.decode(\"ascii\")\n else:\n # empty bytes crashes codecs.escape_encode (#1087)\n return \"\"\n\n def ascii_escaped(val):\n \"\"\"If val is pure ascii, returns it as a str(). Otherwise, escapes\n bytes objects into a sequence of escaped bytes:\n\n b'\\xc3\\xb4\\xc5\\xd6' -> u'\\\\xc3\\\\xb4\\\\xc5\\\\xd6'\n\n and escapes unicode objects into a sequence of escaped unicode\n ids, e.g.:\n\n '4\\\\nV\\\\U00043efa\\\\x0eMXWB\\\\x1e\\\\u3028\\\\u15fd\\\\xcd\\\\U0007d944'\n\n note:\n the obvious \"v.decode('unicode-escape')\" will return\n valid utf-8 unicode if it finds them in bytes, but we\n want to return escaped bytes for any byte, even if they match\n a utf-8 string.\n\n \"\"\"\n if isinstance(val, bytes):\n ret = _bytes_to_ascii(val)\n else:\n ret = val.encode(\"unicode_escape\").decode(\"ascii\")\n return _translate_non_printable(ret)\n\n\nelse:\n STRING_TYPES = six.string_types\n UNICODE_TYPES = six.text_type\n\n def ascii_escaped(val):\n \"\"\"In py2 bytes and str are the same type, so return if it's a bytes\n object, return it unchanged if it is a full ascii string,\n otherwise escape it into its binary form.\n\n If it's a unicode string, change the unicode characters into\n unicode escapes.\n\n \"\"\"\n if isinstance(val, bytes):\n try:\n ret = val.decode(\"ascii\")\n except UnicodeDecodeError:\n ret = val.encode(\"string-escape\").decode(\"ascii\")\n else:\n ret = val.encode(\"unicode-escape\").decode(\"ascii\")\n return _translate_non_printable(ret)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/compat.py__PytestWrapper_get_real_func.return.obj": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/compat.py__PytestWrapper_get_real_func.return.obj", "embedding": null, "metadata": {"file_path": "src/_pytest/compat.py", "file_name": "compat.py", "file_type": "text/x-python", "category": "implementation", "start_line": 266, "end_line": 303, "span_ids": ["get_real_func", "_PytestWrapper"], "tokens": 313}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _PytestWrapper(object):\n \"\"\"Dummy wrapper around a function object for internal use only.\n\n Used to correctly unwrap the underlying function object\n when we are creating fixtures, because we wrap the function object ourselves with a decorator\n to issue warnings when the fixture function is called directly.\n \"\"\"\n\n def __init__(self, obj):\n self.obj = obj\n\n\ndef get_real_func(obj):\n \"\"\" gets the real function object of the (possibly) wrapped object by\n functools.wraps or functools.partial.\n \"\"\"\n start_obj = obj\n for i in range(100):\n # __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function\n # to trigger a warning if it gets called directly instead of by pytest: we don't\n # want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774)\n new_obj = getattr(obj, \"__pytest_wrapped__\", None)\n if isinstance(new_obj, _PytestWrapper):\n obj = new_obj.obj\n break\n new_obj = getattr(obj, \"__wrapped__\", None)\n if new_obj is None:\n break\n obj = new_obj\n else:\n raise ValueError(\n (\"could not find real function of {start}\\nstopped at {current}\").format(\n start=saferepr(start_obj), current=saferepr(obj)\n )\n )\n if isinstance(obj, functools.partial):\n obj = obj.func\n return obj", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/compat.py_get_real_method_get_real_method.return.obj": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/compat.py_get_real_method_get_real_method.return.obj", "embedding": null, "metadata": {"file_path": "src/_pytest/compat.py", "file_name": "compat.py", "file_type": "text/x-python", "category": "implementation", "start_line": 306, "end_line": 318, "span_ids": ["get_real_method"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_real_method(obj, holder):\n \"\"\"\n Attempts to obtain the real function object that might be wrapping ``obj``, while at the same time\n returning a bound method to ``holder`` if the original object was a bound method.\n \"\"\"\n try:\n is_method = hasattr(obj, \"__func__\")\n obj = get_real_func(obj)\n except Exception:\n return obj\n if is_method and hasattr(obj, \"__get__\") and callable(obj.__get__):\n obj = obj.__get__(holder)\n return obj", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/compat.py_getfslineno_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/compat.py_getfslineno_", "embedding": null, "metadata": {"file_path": "src/_pytest/compat.py", "file_name": "compat.py", "file_type": "text/x-python", "category": "implementation", "start_line": 321, "end_line": 458, "span_ids": ["impl:52", "_is_unittest_unexpected_success_a_failure", "FuncargnamesCompatAttr", "if__PY2_.CaptureIO.encoding", "impl:48", "if__PY2_.else_.CaptureIO", "safe_getattr", "_setup_collect_fakemodule", "getfslineno", "getimfunc", "if__PY2_.CaptureIO", "impl:56", "safe_isclass", "if__PY2_.else_.CaptureIO.__init__", "FuncargnamesCompatAttr.funcargnames"], "tokens": 804}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def getfslineno(obj):\n # xxx let decorators etc specify a sane ordering\n obj = get_real_func(obj)\n if hasattr(obj, \"place_as\"):\n obj = obj.place_as\n fslineno = _pytest._code.getfslineno(obj)\n assert isinstance(fslineno[1], int), obj\n return fslineno\n\n\ndef getimfunc(func):\n try:\n return func.__func__\n except AttributeError:\n return func\n\n\ndef safe_getattr(object, name, default):\n \"\"\" Like getattr but return default upon any Exception or any OutcomeException.\n\n Attribute access can potentially fail for 'evil' Python objects.\n See issue #214.\n It catches OutcomeException because of #2490 (issue #580), new outcomes are derived from BaseException\n instead of Exception (for more details check #2707)\n \"\"\"\n try:\n return getattr(object, name, default)\n except TEST_OUTCOME:\n return default\n\n\ndef safe_isclass(obj):\n \"\"\"Ignore any exception via isinstance on Python 3.\"\"\"\n try:\n return isclass(obj)\n except Exception:\n return False\n\n\ndef _is_unittest_unexpected_success_a_failure():\n \"\"\"Return if the test suite should fail if an @expectedFailure unittest test PASSES.\n\n From https://docs.python.org/3/library/unittest.html?highlight=unittest#unittest.TestResult.wasSuccessful:\n Changed in version 3.4: Returns False if there were any\n unexpectedSuccesses from tests marked with the expectedFailure() decorator.\n \"\"\"\n return sys.version_info >= (3, 4)\n\n\nif _PY3:\n\n def safe_str(v):\n \"\"\"returns v as string\"\"\"\n return str(v)\n\n\nelse:\n\n def safe_str(v):\n \"\"\"returns v as string, converting to ascii if necessary\"\"\"\n try:\n return str(v)\n except UnicodeError:\n if not isinstance(v, text_type):\n v = text_type(v)\n errors = \"replace\"\n return v.encode(\"utf-8\", errors)\n\n\nCOLLECT_FAKEMODULE_ATTRIBUTES = (\n \"Collector\",\n \"Module\",\n \"Function\",\n \"Instance\",\n \"Session\",\n \"Item\",\n \"Class\",\n \"File\",\n \"_fillfuncargs\",\n)\n\n\ndef _setup_collect_fakemodule():\n from types import ModuleType\n import pytest\n\n pytest.collect = ModuleType(\"pytest.collect\")\n pytest.collect.__all__ = [] # used for setns\n for attr in COLLECT_FAKEMODULE_ATTRIBUTES:\n setattr(pytest.collect, attr, getattr(pytest, attr))\n\n\nif _PY2:\n # Without this the test_dupfile_on_textio will fail, otherwise CaptureIO could directly inherit from StringIO.\n from py.io import TextIO\n\n class CaptureIO(TextIO):\n @property\n def encoding(self):\n return getattr(self, \"_encoding\", \"UTF-8\")\n\n\nelse:\n import io\n\n class CaptureIO(io.TextIOWrapper):\n def __init__(self):\n super(CaptureIO, self).__init__(\n io.BytesIO(), encoding=\"UTF-8\", newline=\"\", write_through=True\n )\n\n def getvalue(self):\n return self.buffer.getvalue().decode(\"UTF-8\")\n\n\nclass FuncargnamesCompatAttr(object):\n \"\"\" helper class so that Metafunc, Function and FixtureRequest\n don't need to each define the \"funcargnames\" compatibility attribute.\n \"\"\"\n\n @property\n def funcargnames(self):\n \"\"\" alias attribute for ``fixturenames`` for pre-2.3 compatibility\"\"\"\n return self.fixturenames\n\n\nif six.PY2:\n\n def lru_cache(*_, **__):\n def dec(fn):\n return fn\n\n return dec\n\n\nelse:\n from functools import lru_cache # noqa: F401", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py__command_line_options__ConftestImportFailure.__init__.self.excinfo.excinfo": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py__command_line_options__ConftestImportFailure.__init__.self.excinfo.excinfo", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 45, "span_ids": ["ConftestImportFailure", "imports", "docstring", "impl"], "tokens": 279}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" command line options, ini-file and conftest.py processing. \"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport copy\nimport inspect\nimport os\nimport shlex\nimport sys\nimport types\nimport warnings\n\nimport py\nimport six\nfrom pluggy import HookimplMarker\nfrom pluggy import HookspecMarker\nfrom pluggy import PluginManager\n\nimport _pytest._code\nimport _pytest.assertion\nimport _pytest.hookspec # the extension point definitions\nfrom .exceptions import PrintHelp\nfrom .exceptions import UsageError\nfrom .findpaths import determine_setup\nfrom .findpaths import exists\nfrom _pytest import deprecated\nfrom _pytest._code import ExceptionInfo\nfrom _pytest._code import filter_traceback\nfrom _pytest.compat import lru_cache\nfrom _pytest.compat import safe_str\nfrom _pytest.outcomes import fail\nfrom _pytest.outcomes import Skipped\nfrom _pytest.warning_types import PytestConfigWarning\n\nhookimpl = HookimplMarker(\"pytest\")\nhookspec = HookspecMarker(\"pytest\")\n\n\nclass ConftestImportFailure(Exception):\n def __init__(self, path, excinfo):\n Exception.__init__(self, path, excinfo)\n self.path = path\n self.excinfo = excinfo", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_main_main.try_.except_UsageError_as_e_.return.EXIT_USAGEERROR": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_main_main.try_.except_UsageError_as_e_.return.EXIT_USAGEERROR", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 48, "end_line": 86, "span_ids": ["main"], "tokens": 293}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def main(args=None, plugins=None):\n \"\"\" return exit code, after performing an in-process test run.\n\n :arg args: list of command line arguments.\n\n :arg plugins: list of plugin objects to be auto-registered during\n initialization.\n \"\"\"\n from _pytest.main import EXIT_USAGEERROR\n\n try:\n try:\n config = _prepareconfig(args, plugins)\n except ConftestImportFailure as e:\n exc_info = ExceptionInfo(e.excinfo)\n tw = py.io.TerminalWriter(sys.stderr)\n tw.line(\n \"ImportError while loading conftest '{e.path}'.\".format(e=e), red=True\n )\n exc_info.traceback = exc_info.traceback.filter(filter_traceback)\n exc_repr = (\n exc_info.getrepr(style=\"short\", chain=False)\n if exc_info.traceback\n else exc_info.exconly()\n )\n formatted_tb = safe_str(exc_repr)\n for line in formatted_tb.splitlines():\n tw.line(line.rstrip(), red=True)\n return 4\n else:\n try:\n return config.hook.pytest_cmdline_main(config=config)\n finally:\n config._ensure_unconfigure()\n except UsageError as e:\n tw = py.io.TerminalWriter(sys.stderr)\n for msg in e.args:\n tw.line(\"ERROR: {}\\n\".format(msg), red=True)\n return EXIT_USAGEERROR", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_cmdline_get_plugin_manager.return.get_config_pluginmanage": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_cmdline_get_plugin_manager.return.get_config_pluginmanage", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 89, "end_line": 177, "span_ids": ["get_config", "cmdline", "filename_arg", "get_plugin_manager", "directory_arg", "impl:5"], "tokens": 485}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class cmdline(object): # compatibility namespace\n main = staticmethod(main)\n\n\ndef filename_arg(path, optname):\n \"\"\" Argparse type validator for filename arguments.\n\n :path: path of filename\n :optname: name of the option\n \"\"\"\n if os.path.isdir(path):\n raise UsageError(\"{} must be a filename, given: {}\".format(optname, path))\n return path\n\n\ndef directory_arg(path, optname):\n \"\"\"Argparse type validator for directory arguments.\n\n :path: path of directory\n :optname: name of the option\n \"\"\"\n if not os.path.isdir(path):\n raise UsageError(\"{} must be a directory, given: {}\".format(optname, path))\n return path\n\n\n# Plugins that cannot be disabled via \"-p no:X\" currently.\nessential_plugins = (\n \"mark\",\n \"main\",\n \"runner\",\n \"python\",\n \"fixtures\",\n \"helpconfig\", # Provides -p.\n)\n\ndefault_plugins = essential_plugins + (\n \"terminal\",\n \"debugging\",\n \"unittest\",\n \"capture\",\n \"skipping\",\n \"tmpdir\",\n \"monkeypatch\",\n \"recwarn\",\n \"pastebin\",\n \"nose\",\n \"assertion\",\n \"junitxml\",\n \"resultlog\",\n \"doctest\",\n \"cacheprovider\",\n \"freeze_support\",\n \"setuponly\",\n \"setupplan\",\n \"stepwise\",\n \"warnings\",\n \"logging\",\n \"reports\",\n)\n\nbuiltin_plugins = set(default_plugins)\nbuiltin_plugins.add(\"pytester\")\n\n\ndef get_config(args=None):\n # subsequent calls to main will create a fresh instance\n pluginmanager = PytestPluginManager()\n config = Config(pluginmanager)\n\n if args is not None:\n # Handle any \"-p no:plugin\" args.\n pluginmanager.consider_preparse(args)\n\n for spec in default_plugins:\n pluginmanager.import_plugin(spec)\n return config\n\n\ndef get_plugin_manager():\n \"\"\"\n Obtain a new instance of the\n :py:class:`_pytest.config.PytestPluginManager`, with default plugins\n already loaded.\n\n This function can be used by integration with other tools, like hooking\n into pytest to run tests into an IDE.\n \"\"\"\n return get_config().pluginmanager", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py__prepareconfig__prepareconfig.try_.except_BaseException_.raise": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py__prepareconfig__prepareconfig.try_.except_BaseException_.raise", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 180, "end_line": 208, "span_ids": ["_prepareconfig"], "tokens": 222}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _prepareconfig(args=None, plugins=None):\n warning = None\n if args is None:\n args = sys.argv[1:]\n elif isinstance(args, py.path.local):\n args = [str(args)]\n elif not isinstance(args, (tuple, list)):\n msg = \"`args` parameter expected to be a list or tuple of strings, got: {!r} (type: {})\"\n raise TypeError(msg.format(args, type(args)))\n\n config = get_config(args)\n pluginmanager = config.pluginmanager\n try:\n if plugins:\n for plugin in plugins:\n if isinstance(plugin, six.string_types):\n pluginmanager.consider_pluginarg(plugin)\n else:\n pluginmanager.register(plugin)\n if warning:\n from _pytest.warnings import _issue_warning_captured\n\n _issue_warning_captured(warning, hook=config.hook, stacklevel=4)\n return pluginmanager.hook.pytest_cmdline_parse(\n pluginmanager=pluginmanager, args=args\n )\n except BaseException:\n config._ensure_unconfigure()\n raise", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager_PytestPluginManager.addhooks.return.self_add_hookspecs_module": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager_PytestPluginManager.addhooks.return.self_add_hookspecs_module", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 211, "end_line": 257, "span_ids": ["PytestPluginManager", "PytestPluginManager.addhooks"], "tokens": 386}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class PytestPluginManager(PluginManager):\n \"\"\"\n Overwrites :py:class:`pluggy.PluginManager ` to add pytest-specific\n functionality:\n\n * loading plugins from the command line, ``PYTEST_PLUGINS`` env variable and\n ``pytest_plugins`` global variables found in plugins being loaded;\n * ``conftest.py`` loading during start-up;\n \"\"\"\n\n def __init__(self):\n super(PytestPluginManager, self).__init__(\"pytest\")\n self._conftest_plugins = set()\n\n # state related to local conftest plugins\n self._dirpath2confmods = {}\n self._conftestpath2mod = {}\n self._confcutdir = None\n self._noconftest = False\n self._duplicatepaths = set()\n\n self.add_hookspecs(_pytest.hookspec)\n self.register(self)\n if os.environ.get(\"PYTEST_DEBUG\"):\n err = sys.stderr\n encoding = getattr(err, \"encoding\", \"utf8\")\n try:\n err = py.io.dupfile(err, encoding=encoding)\n except Exception:\n pass\n self.trace.root.setwriter(err.write)\n self.enable_tracing()\n\n # Config._consider_importhook will set a real object if required.\n self.rewrite_hook = _pytest.assertion.DummyRewriteHook()\n # Used to know when we are importing conftests after the pytest_configure stage\n self._configured = False\n\n def addhooks(self, module_or_class):\n \"\"\"\n .. deprecated:: 2.8\n\n Use :py:meth:`pluggy.PluginManager.add_hookspecs `\n instead.\n \"\"\"\n warnings.warn(deprecated.PLUGIN_MANAGER_ADDHOOKS, stacklevel=2)\n return self.add_hookspecs(module_or_class)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager.parse_hookimpl_opts_PytestPluginManager.parse_hookimpl_opts.return.opts": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager.parse_hookimpl_opts_PytestPluginManager.parse_hookimpl_opts.return.opts", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 259, "end_line": 286, "span_ids": ["PytestPluginManager.parse_hookimpl_opts"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class PytestPluginManager(PluginManager):\n\n def parse_hookimpl_opts(self, plugin, name):\n # pytest hooks are always prefixed with pytest_\n # so we avoid accessing possibly non-readable attributes\n # (see issue #1073)\n if not name.startswith(\"pytest_\"):\n return\n # ignore names which can not be hooks\n if name == \"pytest_plugins\":\n return\n\n method = getattr(plugin, name)\n opts = super(PytestPluginManager, self).parse_hookimpl_opts(plugin, name)\n\n # consider only actual functions for hooks (#3775)\n if not inspect.isroutine(method):\n return\n\n # collect unmarked hooks as long as they have the `pytest_' prefix\n if opts is None and name.startswith(\"pytest_\"):\n opts = {}\n if opts is not None:\n # TODO: DeprecationWarning, people should use hookimpl\n # https://github.com/pytest-dev/pytest/issues/4562\n known_marks = {m.name for m in getattr(method, \"pytestmark\", [])}\n\n for name in (\"tryfirst\", \"trylast\", \"optionalhook\", \"hookwrapper\"):\n opts.setdefault(name, hasattr(method, name) or name in known_marks)\n return opts", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager.parse_hookspec_opts_PytestPluginManager.parse_hookspec_opts.return.opts": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager.parse_hookspec_opts_PytestPluginManager.parse_hookspec_opts.return.opts", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 288, "end_line": 305, "span_ids": ["PytestPluginManager.parse_hookspec_opts"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class PytestPluginManager(PluginManager):\n\n def parse_hookspec_opts(self, module_or_class, name):\n opts = super(PytestPluginManager, self).parse_hookspec_opts(\n module_or_class, name\n )\n if opts is None:\n method = getattr(module_or_class, name)\n\n if name.startswith(\"pytest_\"):\n # todo: deprecate hookspec hacks\n # https://github.com/pytest-dev/pytest/issues/4562\n known_marks = {m.name for m in getattr(method, \"pytestmark\", [])}\n opts = {\n \"firstresult\": hasattr(method, \"firstresult\")\n or \"firstresult\" in known_marks,\n \"historic\": hasattr(method, \"historic\")\n or \"historic\" in known_marks,\n }\n return opts", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager.register_PytestPluginManager.register.return.ret": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager.register_PytestPluginManager.register.return.ret", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 307, "end_line": 326, "span_ids": ["PytestPluginManager.register"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class PytestPluginManager(PluginManager):\n\n def register(self, plugin, name=None):\n if name in [\"pytest_catchlog\", \"pytest_capturelog\"]:\n warnings.warn(\n PytestConfigWarning(\n \"{} plugin has been merged into the core, \"\n \"please remove it from your requirements.\".format(\n name.replace(\"_\", \"-\")\n )\n )\n )\n return\n ret = super(PytestPluginManager, self).register(plugin, name)\n if ret:\n self.hook.pytest_plugin_registered.call_historic(\n kwargs=dict(plugin=plugin, manager=self)\n )\n\n if isinstance(plugin, types.ModuleType):\n self.consider_module(plugin)\n return ret", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager.getplugin_PytestPluginManager.None_3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager.getplugin_PytestPluginManager.None_3", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 328, "end_line": 353, "span_ids": ["PytestPluginManager.hasplugin", "PytestPluginManager.getplugin", "PytestPluginManager.pytest_configure"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class PytestPluginManager(PluginManager):\n\n def getplugin(self, name):\n # support deprecated naming because plugins (xdist e.g.) use it\n return self.get_plugin(name)\n\n def hasplugin(self, name):\n \"\"\"Return True if the plugin with the given name is registered.\"\"\"\n return bool(self.get_plugin(name))\n\n def pytest_configure(self, config):\n # XXX now that the pluginmanager exposes hookimpl(tryfirst...)\n # we should remove tryfirst/trylast as markers\n config.addinivalue_line(\n \"markers\",\n \"tryfirst: mark a hook implementation function such that the \"\n \"plugin machinery will try to call it first/as early as possible.\",\n )\n config.addinivalue_line(\n \"markers\",\n \"trylast: mark a hook implementation function such that the \"\n \"plugin machinery will try to call it last/as late as possible.\",\n )\n self._configured = True\n\n #\n # internal API for local conftest plugin handling\n #", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager._set_initial_conftests_PytestPluginManager._set_initial_conftests.if_not_foundanchor_.self__try_load_conftest_c": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager._set_initial_conftests_PytestPluginManager._set_initial_conftests.if_not_foundanchor_.self__try_load_conftest_c", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 354, "end_line": 383, "span_ids": ["PytestPluginManager._set_initial_conftests"], "tokens": 281}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class PytestPluginManager(PluginManager):\n def _set_initial_conftests(self, namespace):\n \"\"\" load initial conftest files given a preparsed \"namespace\".\n As conftest files may add their own command line options\n which have arguments ('--my-opt somepath') we might get some\n false positives. All builtin and 3rd party plugins will have\n been loaded, however, so common options will not confuse our logic\n here.\n \"\"\"\n current = py.path.local()\n self._confcutdir = (\n current.join(namespace.confcutdir, abs=True)\n if namespace.confcutdir\n else None\n )\n self._noconftest = namespace.noconftest\n self._using_pyargs = namespace.pyargs\n testpaths = namespace.file_or_dir\n foundanchor = False\n for path in testpaths:\n path = str(path)\n # remove node-id syntax\n i = path.find(\"::\")\n if i != -1:\n path = path[:i]\n anchor = current.join(path, abs=1)\n if exists(anchor): # we found some file object\n self._try_load_conftest(anchor)\n foundanchor = True\n if not foundanchor:\n self._try_load_conftest(current)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager._try_load_conftest_PytestPluginManager._getconftestmodules.return.clist": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager._try_load_conftest_PytestPluginManager._getconftestmodules.return.clist", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 385, "end_line": 424, "span_ids": ["PytestPluginManager._getconftestmodules", "PytestPluginManager._try_load_conftest"], "tokens": 344}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class PytestPluginManager(PluginManager):\n\n def _try_load_conftest(self, anchor):\n self._getconftestmodules(anchor)\n # let's also consider test* subdirs\n if anchor.check(dir=1):\n for x in anchor.listdir(\"test*\"):\n if x.check(dir=1):\n self._getconftestmodules(x)\n\n @lru_cache(maxsize=128)\n def _getconftestmodules(self, path):\n if self._noconftest:\n return []\n\n if path.isfile():\n directory = path.dirpath()\n else:\n directory = path\n\n if six.PY2: # py2 is not using lru_cache.\n try:\n return self._dirpath2confmods[directory]\n except KeyError:\n pass\n\n # XXX these days we may rather want to use config.rootdir\n # and allow users to opt into looking into the rootdir parent\n # directories instead of requiring to specify confcutdir\n clist = []\n for parent in directory.realpath().parts():\n if self._confcutdir and self._confcutdir.relto(parent):\n continue\n conftestpath = parent.join(\"conftest.py\")\n if conftestpath.isfile():\n # Use realpath to avoid loading the same conftest twice\n # with build systems that create build directories containing\n # symlinks to actual files.\n mod = self._importconftest(conftestpath.realpath())\n clist.append(mod)\n self._dirpath2confmods[directory] = clist\n return clist", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager._rget_with_confmod_PytestPluginManager._importconftest.try_.except_KeyError_.return.mod": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager._rget_with_confmod_PytestPluginManager._importconftest.try_.except_KeyError_.return.mod", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 426, "end_line": 472, "span_ids": ["PytestPluginManager._importconftest", "PytestPluginManager._rget_with_confmod"], "tokens": 381}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class PytestPluginManager(PluginManager):\n\n def _rget_with_confmod(self, name, path):\n modules = self._getconftestmodules(path)\n for mod in reversed(modules):\n try:\n return mod, getattr(mod, name)\n except AttributeError:\n continue\n raise KeyError(name)\n\n def _importconftest(self, conftestpath):\n try:\n return self._conftestpath2mod[conftestpath]\n except KeyError:\n pkgpath = conftestpath.pypkgpath()\n if pkgpath is None:\n _ensure_removed_sysmodule(conftestpath.purebasename)\n try:\n mod = conftestpath.pyimport()\n if (\n hasattr(mod, \"pytest_plugins\")\n and self._configured\n and not self._using_pyargs\n ):\n from _pytest.deprecated import (\n PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST,\n )\n\n fail(\n PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST.format(\n conftestpath, self._confcutdir\n ),\n pytrace=False,\n )\n except Exception:\n raise ConftestImportFailure(conftestpath, sys.exc_info())\n\n self._conftest_plugins.add(mod)\n self._conftestpath2mod[conftestpath] = mod\n dirpath = conftestpath.dirpath()\n if dirpath in self._dirpath2confmods:\n for path, mods in self._dirpath2confmods.items():\n if path and path.relto(dirpath) or path == dirpath:\n assert mod not in mods\n mods.append(mod)\n self.trace(\"loaded conftestmodule %r\" % (mod))\n self.consider_conftest(mod)\n return mod", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager.None_4_PytestPluginManager._import_plugin_specs.for_import_spec_in_plugin.self_import_plugin_import": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager.None_4_PytestPluginManager._import_plugin_specs.for_import_spec_in_plugin.self_import_plugin_import", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 474, "end_line": 535, "span_ids": ["PytestPluginManager._importconftest", "PytestPluginManager.consider_module", "PytestPluginManager.consider_pluginarg", "PytestPluginManager.consider_env", "PytestPluginManager.consider_conftest", "PytestPluginManager.consider_preparse", "PytestPluginManager._import_plugin_specs"], "tokens": 473}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class PytestPluginManager(PluginManager):\n\n #\n # API for bootstrapping plugin loading\n #\n #\n\n def consider_preparse(self, args):\n i = 0\n n = len(args)\n while i < n:\n opt = args[i]\n i += 1\n if isinstance(opt, six.string_types):\n if opt == \"-p\":\n try:\n parg = args[i]\n except IndexError:\n return\n i += 1\n elif opt.startswith(\"-p\"):\n parg = opt[2:]\n else:\n continue\n self.consider_pluginarg(parg)\n\n def consider_pluginarg(self, arg):\n if arg.startswith(\"no:\"):\n name = arg[3:]\n if name in essential_plugins:\n raise UsageError(\"plugin %s cannot be disabled\" % name)\n\n # PR #4304 : remove stepwise if cacheprovider is blocked\n if name == \"cacheprovider\":\n self.set_blocked(\"stepwise\")\n self.set_blocked(\"pytest_stepwise\")\n\n self.set_blocked(name)\n if not name.startswith(\"pytest_\"):\n self.set_blocked(\"pytest_\" + name)\n else:\n name = arg\n # Unblock the plugin. None indicates that it has been blocked.\n # There is no interface with pluggy for this.\n if self._name2plugin.get(name, -1) is None:\n del self._name2plugin[name]\n if not name.startswith(\"pytest_\"):\n if self._name2plugin.get(\"pytest_\" + name, -1) is None:\n del self._name2plugin[\"pytest_\" + name]\n self.import_plugin(arg, consider_entry_points=True)\n\n def consider_conftest(self, conftestmodule):\n self.register(conftestmodule, name=conftestmodule.__file__)\n\n def consider_env(self):\n self._import_plugin_specs(os.environ.get(\"PYTEST_PLUGINS\"))\n\n def consider_module(self, mod):\n self._import_plugin_specs(getattr(mod, \"pytest_plugins\", []))\n\n def _import_plugin_specs(self, spec):\n plugins = _get_plugin_specs_as_list(spec)\n for import_spec in plugins:\n self.import_plugin(import_spec)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager.import_plugin_PytestPluginManager.import_plugin.try_.else_.self_register_mod_modnam": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_PytestPluginManager.import_plugin_PytestPluginManager.import_plugin.try_.else_.self_register_mod_modnam", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 537, "end_line": 583, "span_ids": ["PytestPluginManager.import_plugin"], "tokens": 395}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class PytestPluginManager(PluginManager):\n\n def import_plugin(self, modname, consider_entry_points=False):\n \"\"\"\n Imports a plugin with ``modname``. If ``consider_entry_points`` is True, entry point\n names are also considered to find a plugin.\n \"\"\"\n # most often modname refers to builtin modules, e.g. \"pytester\",\n # \"terminal\" or \"capture\". Those plugins are registered under their\n # basename for historic purposes but must be imported with the\n # _pytest prefix.\n assert isinstance(modname, six.string_types), (\n \"module name as text required, got %r\" % modname\n )\n modname = str(modname)\n if self.is_blocked(modname) or self.get_plugin(modname) is not None:\n return\n\n importspec = \"_pytest.\" + modname if modname in builtin_plugins else modname\n self.rewrite_hook.mark_rewrite(importspec)\n\n if consider_entry_points:\n loaded = self.load_setuptools_entrypoints(\"pytest11\", name=modname)\n if loaded:\n return\n\n try:\n __import__(importspec)\n except ImportError as e:\n new_exc_message = 'Error importing plugin \"%s\": %s' % (\n modname,\n safe_str(e.args[0]),\n )\n new_exc = ImportError(new_exc_message)\n tb = sys.exc_info()[2]\n\n six.reraise(ImportError, new_exc, tb)\n\n except Skipped as e:\n from _pytest.warnings import _issue_warning_captured\n\n _issue_warning_captured(\n PytestConfigWarning(\"skipped plugin %r: %s\" % (modname, e.msg)),\n self.hook,\n stacklevel=1,\n )\n else:\n mod = sys.modules[importspec]\n self.register(mod, modname)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py__get_plugin_specs_as_list__get_plugin_specs_as_list.return._": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py__get_plugin_specs_as_list__get_plugin_specs_as_list.return._", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 586, "end_line": 603, "span_ids": ["_get_plugin_specs_as_list"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _get_plugin_specs_as_list(specs):\n \"\"\"\n Parses a list of \"plugin specs\" and returns a list of plugin names.\n\n Plugin specs can be given as a list of strings separated by \",\" or already as a list/tuple in\n which case it is returned as a list. Specs can also be `None` in which case an\n empty list is returned.\n \"\"\"\n if specs is not None and not isinstance(specs, types.ModuleType):\n if isinstance(specs, six.string_types):\n specs = specs.split(\",\") if specs else []\n if not isinstance(specs, (list, tuple)):\n raise UsageError(\n \"Plugin specs must be a ','-separated string or a \"\n \"list/tuple of strings for plugin names. Given: %r\" % specs\n )\n return list(specs)\n return []", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py__ensure_removed_sysmodule__iter_rewritable_modules.for_fn_in_package_files_.if_is_simple_module_.elif_is_package_.yield_package_name": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py__ensure_removed_sysmodule__iter_rewritable_modules.for_fn_in_package_files_.if_is_simple_module_.elif_is_package_.yield_package_name", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 606, "end_line": 630, "span_ids": ["_iter_rewritable_modules", "impl:12", "_ensure_removed_sysmodule", "Notset", "Notset.__repr__"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _ensure_removed_sysmodule(modname):\n try:\n del sys.modules[modname]\n except KeyError:\n pass\n\n\nclass Notset(object):\n def __repr__(self):\n return \"\"\n\n\nnotset = Notset()\n\n\ndef _iter_rewritable_modules(package_files):\n for fn in package_files:\n is_simple_module = \"/\" not in fn and fn.endswith(\".py\")\n is_package = fn.count(\"/\") == 1 and fn.endswith(\"__init__.py\")\n if is_simple_module:\n module_name, _ = os.path.splitext(fn)\n yield module_name\n elif is_package:\n package_name = os.path.dirname(fn)\n yield package_name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config_Config.get_terminal_writer.return.self_pluginmanager_get_pl": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config_Config.get_terminal_writer.return.self_pluginmanager_get_pl", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 633, "end_line": 680, "span_ids": ["Config._ensure_unconfigure", "Config.get_terminal_writer", "Config", "Config.add_cleanup", "Config._do_configure"], "tokens": 410}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Config(object):\n \"\"\" access to configuration values, pluginmanager and plugin hooks. \"\"\"\n\n def __init__(self, pluginmanager):\n #: access to command line option as attributes.\n #: (deprecated), use :py:func:`getoption() <_pytest.config.Config.getoption>` instead\n self.option = argparse.Namespace()\n from .argparsing import Parser, FILE_OR_DIR\n\n _a = FILE_OR_DIR\n self._parser = Parser(\n usage=\"%%(prog)s [options] [%s] [%s] [...]\" % (_a, _a),\n processopt=self._processopt,\n )\n #: a pluginmanager instance\n self.pluginmanager = pluginmanager\n self.trace = self.pluginmanager.trace.root.get(\"config\")\n self.hook = self.pluginmanager.hook\n self._inicache = {}\n self._override_ini = ()\n self._opt2dest = {}\n self._cleanup = []\n self.pluginmanager.register(self, \"pytestconfig\")\n self._configured = False\n self.invocation_dir = py.path.local()\n self.hook.pytest_addoption.call_historic(kwargs=dict(parser=self._parser))\n\n def add_cleanup(self, func):\n \"\"\" Add a function to be called when the config object gets out of\n use (usually coninciding with pytest_unconfigure).\"\"\"\n self._cleanup.append(func)\n\n def _do_configure(self):\n assert not self._configured\n self._configured = True\n self.hook.pytest_configure.call_historic(kwargs=dict(config=self))\n\n def _ensure_unconfigure(self):\n if self._configured:\n self._configured = False\n self.hook.pytest_unconfigure(config=self)\n self.hook.pytest_configure._call_history = []\n while self._cleanup:\n fin = self._cleanup.pop()\n fin()\n\n def get_terminal_writer(self):\n return self.pluginmanager.get_plugin(\"terminalreporter\")._tw", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config.pytest_cmdline_parse_Config.pytest_cmdline_parse.return.self": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config.pytest_cmdline_parse_Config.pytest_cmdline_parse.return.self", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 682, "end_line": 704, "span_ids": ["Config.pytest_cmdline_parse"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Config(object):\n\n def pytest_cmdline_parse(self, pluginmanager, args):\n try:\n self.parse(args)\n except UsageError:\n\n # Handle --version and --help here in a minimal fashion.\n # This gets done via helpconfig normally, but its\n # pytest_cmdline_main is not called in case of errors.\n if getattr(self.option, \"version\", False) or \"--version\" in args:\n from _pytest.helpconfig import showversion\n\n showversion(self)\n elif (\n getattr(self.option, \"help\", False) or \"--help\" in args or \"-h\" in args\n ):\n self._parser._getparser().print_help()\n sys.stdout.write(\n \"\\nNOTE: displaying only minimal help due to UsageError.\\n\\n\"\n )\n\n raise\n\n return self", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config.notify_exception_Config.notify_exception.if_not_any_res_.for_line_in_str_excrepr_.sys_stderr_flush_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config.notify_exception_Config.notify_exception.if_not_any_res_.for_line_in_str_excrepr_.sys_stderr_flush_", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 706, "end_line": 718, "span_ids": ["Config.notify_exception"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Config(object):\n\n def notify_exception(self, excinfo, option=None):\n if option and getattr(option, \"fulltrace\", False):\n style = \"long\"\n else:\n style = \"native\"\n excrepr = excinfo.getrepr(\n funcargs=True, showlocals=getattr(option, \"showlocals\", False), style=style\n )\n res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo)\n if not any(res):\n for line in str(excrepr).split(\"\\n\"):\n sys.stderr.write(\"INTERNALERROR> %s\\n\" % line)\n sys.stderr.flush()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config.cwd_relative_nodeid_Config.pytest_load_initial_conftests.self_pluginmanager__set_i": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config.cwd_relative_nodeid_Config.pytest_load_initial_conftests.self_pluginmanager__set_i", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 720, "end_line": 747, "span_ids": ["Config.cwd_relative_nodeid", "Config.pytest_load_initial_conftests", "Config.fromdictargs", "Config._processopt"], "tokens": 255}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Config(object):\n\n def cwd_relative_nodeid(self, nodeid):\n # nodeid's are relative to the rootpath, compute relative to cwd\n if self.invocation_dir != self.rootdir:\n fullpath = self.rootdir.join(nodeid)\n nodeid = self.invocation_dir.bestrelpath(fullpath)\n return nodeid\n\n @classmethod\n def fromdictargs(cls, option_dict, args):\n \"\"\" constructor useable for subprocesses. \"\"\"\n config = get_config(args)\n config.option.__dict__.update(option_dict)\n config.parse(args, addopts=False)\n for x in config.option.plugins:\n config.pluginmanager.consider_pluginarg(x)\n return config\n\n def _processopt(self, opt):\n for name in opt._short_opts + opt._long_opts:\n self._opt2dest[name] = opt.dest\n\n if hasattr(opt, \"default\") and opt.dest:\n if not hasattr(self.option, opt.dest):\n setattr(self.option, opt.dest, opt.default)\n\n @hookimpl(trylast=True)\n def pytest_load_initial_conftests(self, early_config):\n self.pluginmanager._set_initial_conftests(early_config.known_args_namespace)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config._initini_Config._initini.self._override_ini.ns_override_ini_or_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config._initini_Config._initini.self._override_ini.ns_override_ini_or_", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 749, "end_line": 764, "span_ids": ["Config._initini"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Config(object):\n\n def _initini(self, args):\n ns, unknown_args = self._parser.parse_known_and_unknown_args(\n args, namespace=copy.copy(self.option)\n )\n r = determine_setup(\n ns.inifilename,\n ns.file_or_dir + unknown_args,\n rootdir_cmd_arg=ns.rootdir or None,\n config=self,\n )\n self.rootdir, self.inifile, self.inicfg = r\n self._parser.extra_info[\"rootdir\"] = self.rootdir\n self._parser.extra_info[\"inifile\"] = self.inifile\n self._parser.addini(\"addopts\", \"extra command line options\", \"args\")\n self._parser.addini(\"minversion\", \"minimally required pytest version\")\n self._override_ini = ns.override_ini or ()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config._consider_importhook_Config._consider_importhook._warn_about_missing_asser": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config._consider_importhook_Config._consider_importhook._warn_about_missing_asser", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 766, "end_line": 782, "span_ids": ["Config._consider_importhook"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Config(object):\n\n def _consider_importhook(self, args):\n \"\"\"Install the PEP 302 import hook if using assertion rewriting.\n\n Needs to parse the --assert= option from the commandline\n and find all the installed plugins to mark them for rewriting\n by the importhook.\n \"\"\"\n ns, unknown_args = self._parser.parse_known_and_unknown_args(args)\n mode = getattr(ns, \"assertmode\", \"plain\")\n if mode == \"rewrite\":\n try:\n hook = _pytest.assertion.install_importhook(self)\n except SystemError:\n mode = \"plain\"\n else:\n self._mark_plugins_for_rewrite(hook)\n _warn_about_missing_assertion(mode)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config._mark_plugins_for_rewrite_Config._validate_args.return.args": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config._mark_plugins_for_rewrite_Config._validate_args.return.args", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 784, "end_line": 824, "span_ids": ["Config._validate_args", "Config._mark_plugins_for_rewrite"], "tokens": 308}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Config(object):\n\n def _mark_plugins_for_rewrite(self, hook):\n \"\"\"\n Given an importhook, mark for rewrite any top-level\n modules or packages in the distribution package for\n all pytest plugins.\n \"\"\"\n import pkg_resources\n\n self.pluginmanager.rewrite_hook = hook\n\n if os.environ.get(\"PYTEST_DISABLE_PLUGIN_AUTOLOAD\"):\n # We don't autoload from setuptools entry points, no need to continue.\n return\n\n # 'RECORD' available for plugins installed normally (pip install)\n # 'SOURCES.txt' available for plugins installed in dev mode (pip install -e)\n # for installed plugins 'SOURCES.txt' returns an empty list, and vice-versa\n # so it shouldn't be an issue\n metadata_files = \"RECORD\", \"SOURCES.txt\"\n\n package_files = (\n entry.split(\",\")[0]\n for entrypoint in pkg_resources.iter_entry_points(\"pytest11\")\n for metadata in metadata_files\n for entry in entrypoint.dist._get_metadata(metadata)\n )\n\n for name in _iter_rewritable_modules(package_files):\n hook.mark_rewrite(name)\n\n def _validate_args(self, args, via):\n \"\"\"Validate known args.\"\"\"\n self._parser._config_source_hint = via\n try:\n self._parser.parse_known_and_unknown_args(\n args, namespace=copy.copy(self.option)\n )\n finally:\n del self._parser._config_source_hint\n\n return args", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config._preparse_Config._preparse.try_.except_ConftestImportFail.if_ns_help_or_ns_version_.else_.raise": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config._preparse_Config._preparse.try_.except_ConftestImportFail.if_ns_help_or_ns_version_.else_.raise", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 826, "end_line": 873, "span_ids": ["Config._preparse"], "tokens": 422}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Config(object):\n\n def _preparse(self, args, addopts=True):\n if addopts:\n env_addopts = os.environ.get(\"PYTEST_ADDOPTS\", \"\")\n if len(env_addopts):\n args[:] = (\n self._validate_args(shlex.split(env_addopts), \"via PYTEST_ADDOPTS\")\n + args\n )\n self._initini(args)\n if addopts:\n args[:] = (\n self._validate_args(self.getini(\"addopts\"), \"via addopts config\") + args\n )\n\n self._checkversion()\n self._consider_importhook(args)\n self.pluginmanager.consider_preparse(args)\n if not os.environ.get(\"PYTEST_DISABLE_PLUGIN_AUTOLOAD\"):\n # Don't autoload from setuptools entry point. Only explicitly specified\n # plugins are going to be loaded.\n self.pluginmanager.load_setuptools_entrypoints(\"pytest11\")\n self.pluginmanager.consider_env()\n self.known_args_namespace = ns = self._parser.parse_known_args(\n args, namespace=copy.copy(self.option)\n )\n if self.known_args_namespace.confcutdir is None and self.inifile:\n confcutdir = py.path.local(self.inifile).dirname\n self.known_args_namespace.confcutdir = confcutdir\n try:\n self.hook.pytest_load_initial_conftests(\n early_config=self, args=args, parser=self._parser\n )\n except ConftestImportFailure:\n e = sys.exc_info()[1]\n if ns.help or ns.version:\n # we don't want to prevent --help/--version to work\n # so just let is pass and print a warning at the end\n from _pytest.warnings import _issue_warning_captured\n\n _issue_warning_captured(\n PytestConfigWarning(\n \"could not load initial conftests: {}\".format(e.path)\n ),\n self.hook,\n stacklevel=2,\n )\n else:\n raise", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config._checkversion_Config._checkversion.if_minver_.if_parse_version_minver_.raise_pytest_UsageError_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config._checkversion_Config._checkversion.if_minver_.if_parse_version_minver_.raise_pytest_UsageError_", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 875, "end_line": 890, "span_ids": ["Config._checkversion"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Config(object):\n\n def _checkversion(self):\n import pytest\n from pkg_resources import parse_version\n\n minver = self.inicfg.get(\"minversion\", None)\n if minver:\n if parse_version(minver) > parse_version(pytest.__version__):\n raise pytest.UsageError(\n \"%s:%d: requires pytest-%s, actual pytest-%s'\"\n % (\n self.inicfg.config.path,\n self.inicfg.lineof(\"minversion\"),\n minver,\n pytest.__version__,\n )\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config.parse_Config.parse.try_.except_PrintHelp_.pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config.parse_Config.parse.try_.except_PrintHelp_.pass", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 892, "end_line": 916, "span_ids": ["Config.parse"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Config(object):\n\n def parse(self, args, addopts=True):\n # parse given cmdline arguments into this config object.\n assert not hasattr(\n self, \"args\"\n ), \"can only parse cmdline args at most once per Config object\"\n self._origargs = args\n self.hook.pytest_addhooks.call_historic(\n kwargs=dict(pluginmanager=self.pluginmanager)\n )\n self._preparse(args, addopts=addopts)\n # XXX deprecated hook:\n self.hook.pytest_cmdline_preparse(config=self, args=args)\n self._parser.after_preparse = True\n try:\n args = self._parser.parse_setoption(\n args, self.option, namespace=self.option\n )\n if not args:\n if self.invocation_dir == self.rootdir:\n args = self.getini(\"testpaths\")\n if not args:\n args = [str(self.invocation_dir)]\n self.args = args\n except PrintHelp:\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config.addinivalue_line_Config.getini.try_.except_KeyError_.return.val": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config.addinivalue_line_Config.getini.try_.except_KeyError_.return.val", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 918, "end_line": 935, "span_ids": ["Config.getini", "Config.addinivalue_line"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Config(object):\n\n def addinivalue_line(self, name, line):\n \"\"\" add a line to an ini-file option. The option must have been\n declared but might not yet be set in which case the line becomes the\n the first line in its value. \"\"\"\n x = self.getini(name)\n assert isinstance(x, list)\n x.append(line) # modifies the cached list inline\n\n def getini(self, name):\n \"\"\" return configuration value from an :ref:`ini file `. If the\n specified name hasn't been registered through a prior\n :py:func:`parser.addini <_pytest.config.Parser.addini>`\n call (usually from a plugin), a ValueError is raised. \"\"\"\n try:\n return self._inicache[name]\n except KeyError:\n self._inicache[name] = val = self._getini(name)\n return val", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config._getini_Config._getini.if_type_pathlist_.else_.return.value": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config._getini_Config._getini.if_type_pathlist_.else_.return.value", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 937, "end_line": 966, "span_ids": ["Config._getini"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Config(object):\n\n def _getini(self, name):\n try:\n description, type, default = self._parser._inidict[name]\n except KeyError:\n raise ValueError(\"unknown configuration value: %r\" % (name,))\n value = self._get_override_ini_value(name)\n if value is None:\n try:\n value = self.inicfg[name]\n except KeyError:\n if default is not None:\n return default\n if type is None:\n return \"\"\n return []\n if type == \"pathlist\":\n dp = py.path.local(self.inicfg.config.path).dirpath()\n values = []\n for relpath in shlex.split(value):\n values.append(dp.join(relpath, abs=True))\n return values\n elif type == \"args\":\n return shlex.split(value)\n elif type == \"linelist\":\n return [t for t in map(lambda x: x.strip(), value.split(\"\\n\")) if t]\n elif type == \"bool\":\n return bool(_strtobool(value.strip()))\n else:\n assert type is None\n return value", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config._getconftest_pathlist_Config._getconftest_pathlist.return.values": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config._getconftest_pathlist_Config._getconftest_pathlist.return.values", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 968, "end_line": 980, "span_ids": ["Config._getconftest_pathlist"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Config(object):\n\n def _getconftest_pathlist(self, name, path):\n try:\n mod, relroots = self.pluginmanager._rget_with_confmod(name, path)\n except KeyError:\n return None\n modpath = py.path.local(mod.__file__).dirpath()\n values = []\n for relroot in relroots:\n if not isinstance(relroot, py.path.local):\n relroot = relroot.replace(\"/\", py.path.local.sep)\n relroot = modpath.join(relroot, abs=True)\n values.append(relroot)\n return values", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config._get_override_ini_value_Config._get_override_ini_value.return.value": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config._get_override_ini_value_Config._get_override_ini_value.return.value", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 982, "end_line": 995, "span_ids": ["Config._get_override_ini_value"], "tokens": 141}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Config(object):\n\n def _get_override_ini_value(self, name):\n value = None\n # override_ini is a list of \"ini=value\" options\n # always use the last item if multiple values are set for same ini-name,\n # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2\n for ini_config in self._override_ini:\n try:\n key, user_ini_value = ini_config.split(\"=\", 1)\n except ValueError:\n raise UsageError(\"-o/--override-ini expects option=value style.\")\n else:\n if key == name:\n value = user_ini_value\n return value", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config.getoption_Config.getvalueorskip.return.self_getoption_name_skip": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_Config.getoption_Config.getvalueorskip.return.self_getoption_name_skip", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 997, "end_line": 1027, "span_ids": ["Config.getvalue", "Config.getoption", "Config.getvalueorskip"], "tokens": 259}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Config(object):\n\n def getoption(self, name, default=notset, skip=False):\n \"\"\" return command line option value.\n\n :arg name: name of the option. You may also specify\n the literal ``--OPT`` option instead of the \"dest\" option name.\n :arg default: default value if no option of that name exists.\n :arg skip: if True raise pytest.skip if option does not exists\n or has a None value.\n \"\"\"\n name = self._opt2dest.get(name, name)\n try:\n val = getattr(self.option, name)\n if val is None and skip:\n raise AttributeError(name)\n return val\n except AttributeError:\n if default is not notset:\n return default\n if skip:\n import pytest\n\n pytest.skip(\"no %r option found\" % (name,))\n raise ValueError(\"no option named %r\" % (name,))\n\n def getvalue(self, name, path=None):\n \"\"\" (deprecated, use getoption()) \"\"\"\n return self.getoption(name)\n\n def getvalueorskip(self, name, path=None):\n \"\"\" (deprecated, use getoption(skip=True)) \"\"\"\n return self.getoption(name, skip=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py__assertion_supported__warn_about_missing_assertion.if_not__assertion_support.if_mode_plain_.else_.sys_stderr_write_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py__assertion_supported__warn_about_missing_assertion.if_not__assertion_support.if_mode_plain_.else_.sys_stderr_write_", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1030, "end_line": 1054, "span_ids": ["_warn_about_missing_assertion", "_assertion_supported"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _assertion_supported():\n try:\n assert False\n except AssertionError:\n return True\n else:\n return False\n\n\ndef _warn_about_missing_assertion(mode):\n if not _assertion_supported():\n if mode == \"plain\":\n sys.stderr.write(\n \"WARNING: ASSERTIONS ARE NOT EXECUTED\"\n \" and FAILING TESTS WILL PASS. Are you\"\n \" using python -O?\"\n )\n else:\n sys.stderr.write(\n \"WARNING: assertions not in test modules or\"\n \" plugins will be ignored\"\n \" because assert statements are not executed \"\n \"by the underlying Python interpreter \"\n \"(are you using python -O?)\\n\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_setns_create_terminal_writer.return.tw": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py_setns_create_terminal_writer.return.tw", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1057, "end_line": 1089, "span_ids": ["setns", "create_terminal_writer"], "tokens": 253}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def setns(obj, dic):\n import pytest\n\n for name, value in dic.items():\n if isinstance(value, dict):\n mod = getattr(obj, name, None)\n if mod is None:\n modname = \"pytest.%s\" % name\n mod = types.ModuleType(modname)\n sys.modules[modname] = mod\n mod.__all__ = []\n setattr(obj, name, mod)\n obj.__all__.append(name)\n setns(mod, value)\n else:\n setattr(obj, name, value)\n obj.__all__.append(name)\n # if obj != pytest:\n # pytest.__all__.append(name)\n setattr(pytest, name, value)\n\n\ndef create_terminal_writer(config, *args, **kwargs):\n \"\"\"Create a TerminalWriter instance configured according to the options\n in the config object. Every code which requires a TerminalWriter object\n and has access to a config object should use this function.\n \"\"\"\n tw = py.io.TerminalWriter(*args, **kwargs)\n if config.option.color == \"yes\":\n tw.hasmarkup = True\n if config.option.color == \"no\":\n tw.hasmarkup = False\n return tw", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py__strtobool_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/__init__.py__strtobool_", "embedding": null, "metadata": {"file_path": "src/_pytest/config/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1092, "end_line": 1108, "span_ids": ["_strtobool"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _strtobool(val):\n \"\"\"Convert a string representation of truth to true (1) or false (0).\n\n True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values\n are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if\n 'val' is anything else.\n\n .. note:: copied from distutils.util\n \"\"\"\n val = val.lower()\n if val in (\"y\", \"yes\", \"t\", \"true\", \"on\", \"1\"):\n return 1\n elif val in (\"n\", \"no\", \"f\", \"false\", \"off\", \"0\"):\n return 0\n else:\n raise ValueError(\"invalid truth value %r\" % (val,))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_argparse_Parser.processoption.if_self__processopt_.if_option_dest_.self__processopt_option_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_argparse_Parser.processoption.if_self__processopt_.if_option_dest_.self__processopt_option_", "embedding": null, "metadata": {"file_path": "src/_pytest/config/argparsing.py", "file_name": "argparsing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 33, "span_ids": ["Parser.processoption", "imports", "Parser", "impl"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import argparse\nimport warnings\n\nimport py\nimport six\n\nfrom _pytest.config.exceptions import UsageError\n\nFILE_OR_DIR = \"file_or_dir\"\n\n\nclass Parser(object):\n \"\"\" Parser for command line arguments and ini-file values.\n\n :ivar extra_info: dict of generic param -> value to display in case\n there's an error processing the command line arguments.\n \"\"\"\n\n prog = None\n\n def __init__(self, usage=None, processopt=None):\n self._anonymous = OptionGroup(\"custom options\", parser=self)\n self._groups = []\n self._processopt = processopt\n self._usage = usage\n self._inidict = {}\n self._ininames = []\n self.extra_info = {}\n\n def processoption(self, option):\n if self._processopt:\n if option.dest:\n self._processopt(option)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Parser.getgroup_Parser.getgroup.return.group": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Parser.getgroup_Parser.getgroup.return.group", "embedding": null, "metadata": {"file_path": "src/_pytest/config/argparsing.py", "file_name": "argparsing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 35, "end_line": 56, "span_ids": ["Parser.getgroup"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Parser(object):\n\n def getgroup(self, name, description=\"\", after=None):\n \"\"\" get (or create) a named option Group.\n\n :name: name of the option group.\n :description: long description for --help output.\n :after: name of other group, used for ordering --help output.\n\n The returned group object has an ``addoption`` method with the same\n signature as :py:func:`parser.addoption\n <_pytest.config.Parser.addoption>` but will be shown in the\n respective group in the output of ``pytest. --help``.\n \"\"\"\n for group in self._groups:\n if group.name == name:\n return group\n group = OptionGroup(name, description, parser=self)\n i = 0\n for i, grp in enumerate(self._groups):\n if grp.name == after:\n break\n self._groups.insert(i + 1, group)\n return group", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Parser.addoption_Parser.addoption.self__anonymous_addoption": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Parser.addoption_Parser.addoption.self__anonymous_addoption", "embedding": null, "metadata": {"file_path": "src/_pytest/config/argparsing.py", "file_name": "argparsing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 58, "end_line": 72, "span_ids": ["Parser.addoption"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Parser(object):\n\n def addoption(self, *opts, **attrs):\n \"\"\" register a command line option.\n\n :opts: option names, can be short or long options.\n :attrs: same attributes which the ``add_option()`` function of the\n `argparse library\n `_\n accepts.\n\n After command line parsing options are available on the pytest config\n object via ``config.option.NAME`` where ``NAME`` is usually set\n by passing a ``dest`` attribute, for example\n ``addoption(\"--long\", dest=\"NAME\", ...)``.\n \"\"\"\n self._anonymous.addoption(*opts, **attrs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Parser.parse_Parser._getparser.return.optparser": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Parser.parse_Parser._getparser.return.optparser", "embedding": null, "metadata": {"file_path": "src/_pytest/config/argparsing.py", "file_name": "argparsing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 74, "end_line": 97, "span_ids": ["Parser.parse", "Parser._getparser"], "tokens": 229}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Parser(object):\n\n def parse(self, args, namespace=None):\n from _pytest._argcomplete import try_argcomplete\n\n self.optparser = self._getparser()\n try_argcomplete(self.optparser)\n args = [str(x) if isinstance(x, py.path.local) else x for x in args]\n return self.optparser.parse_args(args, namespace=namespace)\n\n def _getparser(self):\n from _pytest._argcomplete import filescompleter\n\n optparser = MyOptionParser(self, self.extra_info, prog=self.prog)\n groups = self._groups + [self._anonymous]\n for group in groups:\n if group.options:\n desc = group.description or group.name\n arggroup = optparser.add_argument_group(desc)\n for option in group.options:\n n = option.names()\n a = option.attrs()\n arggroup.add_argument(*n, **a)\n # bash like autocompletion for dirs (appending '/')\n optparser.add_argument(FILE_OR_DIR, nargs=\"*\").completer = filescompleter\n return optparser", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Parser.parse_setoption_Parser.parse_known_and_unknown_args.return.optparser_parse_known_arg": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Parser.parse_setoption_Parser.parse_known_and_unknown_args.return.optparser_parse_known_arg", "embedding": null, "metadata": {"file_path": "src/_pytest/config/argparsing.py", "file_name": "argparsing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 99, "end_line": 117, "span_ids": ["Parser.parse_known_and_unknown_args", "Parser.parse_known_args", "Parser.parse_setoption"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Parser(object):\n\n def parse_setoption(self, args, option, namespace=None):\n parsedoption = self.parse(args, namespace=namespace)\n for name, value in parsedoption.__dict__.items():\n setattr(option, name, value)\n return getattr(parsedoption, FILE_OR_DIR)\n\n def parse_known_args(self, args, namespace=None):\n \"\"\"parses and returns a namespace object with known arguments at this\n point.\n \"\"\"\n return self.parse_known_and_unknown_args(args, namespace=namespace)[0]\n\n def parse_known_and_unknown_args(self, args, namespace=None):\n \"\"\"parses and returns a namespace object with known arguments, and\n the remaining arguments unknown at this point.\n \"\"\"\n optparser = self._getparser()\n args = [str(x) if isinstance(x, py.path.local) else x for x in args]\n return optparser.parse_known_args(args, namespace=namespace)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Parser.addini_ArgumentError.__str__.if_self_option_id_.else_.return.self_msg": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Parser.addini_ArgumentError.__str__.if_self_option_id_.else_.return.self_msg", "embedding": null, "metadata": {"file_path": "src/_pytest/config/argparsing.py", "file_name": "argparsing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 119, "end_line": 149, "span_ids": ["Parser.addini", "ArgumentError", "ArgumentError.__str__"], "tokens": 252}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Parser(object):\n\n def addini(self, name, help, type=None, default=None):\n \"\"\" register an ini-file option.\n\n :name: name of the ini-variable\n :type: type of the variable, can be ``pathlist``, ``args``, ``linelist``\n or ``bool``.\n :default: default value if no ini-file option exists but is queried.\n\n The value of ini-variables can be retrieved via a call to\n :py:func:`config.getini(name) <_pytest.config.Config.getini>`.\n \"\"\"\n assert type in (None, \"pathlist\", \"args\", \"linelist\", \"bool\")\n self._inidict[name] = (help, type, default)\n self._ininames.append(name)\n\n\nclass ArgumentError(Exception):\n \"\"\"\n Raised if an Argument instance is created with invalid or\n inconsistent arguments.\n \"\"\"\n\n def __init__(self, msg, option):\n self.msg = msg\n self.option_id = str(option)\n\n def __str__(self):\n if self.option_id:\n return \"option %s: %s\" % (self.option_id, self.msg)\n else:\n return self.msg", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Argument_Argument.names.return.self__short_opts_self__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Argument_Argument.names.return.self__short_opts_self__", "embedding": null, "metadata": {"file_path": "src/_pytest/config/argparsing.py", "file_name": "argparsing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 152, "end_line": 223, "span_ids": ["Argument", "Argument.names"], "tokens": 594}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Argument(object):\n \"\"\"class that mimics the necessary behaviour of optparse.Option\n\n it's currently a least effort implementation\n and ignoring choices and integer prefixes\n https://docs.python.org/3/library/optparse.html#optparse-standard-option-types\n \"\"\"\n\n _typ_map = {\"int\": int, \"string\": str, \"float\": float, \"complex\": complex}\n\n def __init__(self, *names, **attrs):\n \"\"\"store parms in private vars for use in add_argument\"\"\"\n self._attrs = attrs\n self._short_opts = []\n self._long_opts = []\n self.dest = attrs.get(\"dest\")\n if \"%default\" in (attrs.get(\"help\") or \"\"):\n warnings.warn(\n 'pytest now uses argparse. \"%default\" should be'\n ' changed to \"%(default)s\" ',\n DeprecationWarning,\n stacklevel=3,\n )\n try:\n typ = attrs[\"type\"]\n except KeyError:\n pass\n else:\n # this might raise a keyerror as well, don't want to catch that\n if isinstance(typ, six.string_types):\n if typ == \"choice\":\n warnings.warn(\n \"`type` argument to addoption() is the string %r.\"\n \" For choices this is optional and can be omitted, \"\n \" but when supplied should be a type (for example `str` or `int`).\"\n \" (options: %s)\" % (typ, names),\n DeprecationWarning,\n stacklevel=4,\n )\n # argparse expects a type here take it from\n # the type of the first element\n attrs[\"type\"] = type(attrs[\"choices\"][0])\n else:\n warnings.warn(\n \"`type` argument to addoption() is the string %r, \"\n \" but when supplied should be a type (for example `str` or `int`).\"\n \" (options: %s)\" % (typ, names),\n DeprecationWarning,\n stacklevel=4,\n )\n attrs[\"type\"] = Argument._typ_map[typ]\n # used in test_parseopt -> test_parse_defaultgetter\n self.type = attrs[\"type\"]\n else:\n self.type = typ\n try:\n # attribute existence is tested in Config._processopt\n self.default = attrs[\"default\"]\n except KeyError:\n pass\n self._set_opt_strings(names)\n if not self.dest:\n if self._long_opts:\n self.dest = self._long_opts[0][2:].replace(\"-\", \"_\")\n else:\n try:\n self.dest = self._short_opts[0][1:]\n except IndexError:\n raise ArgumentError(\"need a long or short option\", self)\n\n def names(self):\n return self._short_opts + self._long_opts", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Argument.attrs_Argument.attrs.return.self__attrs": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Argument.attrs_Argument.attrs.return.self__attrs", "embedding": null, "metadata": {"file_path": "src/_pytest/config/argparsing.py", "file_name": "argparsing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 225, "end_line": 240, "span_ids": ["Argument.attrs"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Argument(object):\n\n def attrs(self):\n # update any attributes set by processopt\n attrs = \"default dest help\".split()\n if self.dest:\n attrs.append(self.dest)\n for attr in attrs:\n try:\n self._attrs[attr] = getattr(self, attr)\n except AttributeError:\n pass\n if self._attrs.get(\"help\"):\n a = self._attrs[\"help\"]\n a = a.replace(\"%default\", \"%(default)s\")\n # a = a.replace('%prog', '%(prog)s')\n self._attrs[\"help\"] = a\n return self._attrs", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Argument._set_opt_strings_Argument._set_opt_strings.for_opt_in_opts_.if_len_opt_2_.else_.self__long_opts_append_op": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Argument._set_opt_strings_Argument._set_opt_strings.for_opt_in_opts_.if_len_opt_2_.else_.self__long_opts_append_op", "embedding": null, "metadata": {"file_path": "src/_pytest/config/argparsing.py", "file_name": "argparsing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 242, "end_line": 268, "span_ids": ["Argument._set_opt_strings"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Argument(object):\n\n def _set_opt_strings(self, opts):\n \"\"\"directly from optparse\n\n might not be necessary as this is passed to argparse later on\"\"\"\n for opt in opts:\n if len(opt) < 2:\n raise ArgumentError(\n \"invalid option string %r: \"\n \"must be at least two characters long\" % opt,\n self,\n )\n elif len(opt) == 2:\n if not (opt[0] == \"-\" and opt[1] != \"-\"):\n raise ArgumentError(\n \"invalid short option string %r: \"\n \"must be of the form -x, (x any non-dash char)\" % opt,\n self,\n )\n self._short_opts.append(opt)\n else:\n if not (opt[0:2] == \"--\" and opt[2] != \"-\"):\n raise ArgumentError(\n \"invalid long option string %r: \"\n \"must start with --, followed by non-dash\" % opt,\n self,\n )\n self._long_opts.append(opt)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Argument.__repr___Argument.__repr__.return._Argument_format_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_Argument.__repr___Argument.__repr__.return._Argument_format_", "embedding": null, "metadata": {"file_path": "src/_pytest/config/argparsing.py", "file_name": "argparsing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 270, "end_line": 281, "span_ids": ["Argument.__repr__"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Argument(object):\n\n def __repr__(self):\n args = []\n if self._short_opts:\n args += [\"_short_opts: \" + repr(self._short_opts)]\n if self._long_opts:\n args += [\"_long_opts: \" + repr(self._long_opts)]\n args += [\"dest: \" + repr(self.dest)]\n if hasattr(self, \"type\"):\n args += [\"type: \" + repr(self.type)]\n if hasattr(self, \"default\"):\n args += [\"default: \" + repr(self.default)]\n return \"Argument({})\".format(\", \".join(args))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_OptionGroup_OptionGroup._addoption_instance.self_options_append_optio": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_OptionGroup_OptionGroup._addoption_instance.self_options_append_optio", "embedding": null, "metadata": {"file_path": "src/_pytest/config/argparsing.py", "file_name": "argparsing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 284, "end_line": 318, "span_ids": ["OptionGroup._addoption", "OptionGroup", "OptionGroup.addoption", "OptionGroup._addoption_instance"], "tokens": 308}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class OptionGroup(object):\n def __init__(self, name, description=\"\", parser=None):\n self.name = name\n self.description = description\n self.options = []\n self.parser = parser\n\n def addoption(self, *optnames, **attrs):\n \"\"\" add an option to this group.\n\n if a shortened version of a long option is specified it will\n be suppressed in the help. addoption('--twowords', '--two-words')\n results in help showing '--two-words' only, but --twowords gets\n accepted **and** the automatic destination is in args.twowords\n \"\"\"\n conflict = set(optnames).intersection(\n name for opt in self.options for name in opt.names()\n )\n if conflict:\n raise ValueError(\"option names %s already added\" % conflict)\n option = Argument(*optnames, **attrs)\n self._addoption_instance(option, shortupper=False)\n\n def _addoption(self, *optnames, **attrs):\n option = Argument(*optnames, **attrs)\n self._addoption_instance(option, shortupper=True)\n\n def _addoption_instance(self, option, shortupper=False):\n if not shortupper:\n for opt in option._short_opts:\n if opt[0] == \"-\" and opt[1].islower():\n raise ValueError(\"lowercase shortoptions reserved\")\n if self.parser:\n self.parser.processoption(option)\n self.options.append(option)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_MyOptionParser_MyOptionParser.error.raise_UsageError_self_for": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_MyOptionParser_MyOptionParser.error.raise_UsageError_self_for", "embedding": null, "metadata": {"file_path": "src/_pytest/config/argparsing.py", "file_name": "argparsing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 321, "end_line": 344, "span_ids": ["MyOptionParser.error", "MyOptionParser"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class MyOptionParser(argparse.ArgumentParser):\n def __init__(self, parser, extra_info=None, prog=None):\n if not extra_info:\n extra_info = {}\n self._parser = parser\n argparse.ArgumentParser.__init__(\n self,\n prog=prog,\n usage=parser._usage,\n add_help=False,\n formatter_class=DropShorterLongHelpFormatter,\n )\n # extra_info is a dict of (param -> value) to display if there's\n # an usage error to provide more contextual information to the user\n self.extra_info = extra_info\n\n def error(self, message):\n \"\"\"Transform argparse error message into UsageError.\"\"\"\n msg = \"%s: error: %s\" % (self.prog, message)\n\n if hasattr(self._parser, \"_config_source_hint\"):\n msg = \"%s (%s)\" % (msg, self._parser._config_source_hint)\n\n raise UsageError(self.format_usage() + msg)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_MyOptionParser.parse_args_MyOptionParser.parse_args.return.args": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_MyOptionParser.parse_args_MyOptionParser.parse_args.return.args", "embedding": null, "metadata": {"file_path": "src/_pytest/config/argparsing.py", "file_name": "argparsing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 346, "end_line": 357, "span_ids": ["MyOptionParser.parse_args"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class MyOptionParser(argparse.ArgumentParser):\n\n def parse_args(self, args=None, namespace=None):\n \"\"\"allow splitting of positional arguments\"\"\"\n args, argv = self.parse_known_args(args, namespace)\n if argv:\n for arg in argv:\n if arg and arg[0] == \"-\":\n lines = [\"unrecognized arguments: %s\" % (\" \".join(argv))]\n for k, v in sorted(self.extra_info.items()):\n lines.append(\" %s: %s\" % (k, v))\n self.error(\"\\n\".join(lines))\n getattr(args, FILE_OR_DIR).extend(argv)\n return args", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_DropShorterLongHelpFormatter_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/argparsing.py_DropShorterLongHelpFormatter_", "embedding": null, "metadata": {"file_path": "src/_pytest/config/argparsing.py", "file_name": "argparsing.py", "file_type": "text/x-python", "category": "implementation", "start_line": 360, "end_line": 410, "span_ids": ["DropShorterLongHelpFormatter", "DropShorterLongHelpFormatter._format_action_invocation"], "tokens": 508}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DropShorterLongHelpFormatter(argparse.HelpFormatter):\n \"\"\"shorten help for long options that differ only in extra hyphens\n\n - collapse **long** options that are the same except for extra hyphens\n - special action attribute map_long_option allows surpressing additional\n long options\n - shortcut if there are only two options and one of them is a short one\n - cache result on action object as this is called at least 2 times\n \"\"\"\n\n def _format_action_invocation(self, action):\n orgstr = argparse.HelpFormatter._format_action_invocation(self, action)\n if orgstr and orgstr[0] != \"-\": # only optional arguments\n return orgstr\n res = getattr(action, \"_formatted_action_invocation\", None)\n if res:\n return res\n options = orgstr.split(\", \")\n if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2):\n # a shortcut for '-h, --help' or '--abc', '-a'\n action._formatted_action_invocation = orgstr\n return orgstr\n return_list = []\n option_map = getattr(action, \"map_long_option\", {})\n if option_map is None:\n option_map = {}\n short_long = {}\n for option in options:\n if len(option) == 2 or option[2] == \" \":\n continue\n if not option.startswith(\"--\"):\n raise ArgumentError(\n 'long optional argument without \"--\": [%s]' % (option), self\n )\n xxoption = option[2:]\n if xxoption.split()[0] not in option_map:\n shortened = xxoption.replace(\"-\", \"\")\n if shortened not in short_long or len(short_long[shortened]) < len(\n xxoption\n ):\n short_long[shortened] = xxoption\n # now short_long has been filled out to the longest with dashes\n # **and** we keep the right option ordering from add_argument\n for option in options:\n if len(option) == 2 or option[2] == \" \":\n return_list.append(option)\n if option[2:] == short_long.get(option.replace(\"-\", \"\")):\n return_list.append(option.replace(\" \", \"=\", 1))\n action._formatted_action_invocation = \", \".join(return_list)\n return action._formatted_action_invocation", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/exceptions.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/exceptions.py__", "embedding": null, "metadata": {"file_path": "src/_pytest/config/exceptions.py", "file_name": "exceptions.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 10, "span_ids": ["PrintHelp", "UsageError"], "tokens": 44}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class UsageError(Exception):\n \"\"\" error in pytest usage or invocation\"\"\"\n\n\nclass PrintHelp(Exception):\n \"\"\"Raised when pytest should print it's help to skip the rest of the\n argument parsing and validation.\"\"\"\n\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/findpaths.py_os_getcfg.return.None_None_None": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/findpaths.py_os_getcfg.return.None_None_None", "embedding": null, "metadata": {"file_path": "src/_pytest/config/findpaths.py", "file_name": "findpaths.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 52, "span_ids": ["imports", "getcfg", "exists"], "tokens": 362}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os\n\nimport py\n\nfrom .exceptions import UsageError\nfrom _pytest.outcomes import fail\n\n\ndef exists(path, ignore=EnvironmentError):\n try:\n return path.check()\n except ignore:\n return False\n\n\ndef getcfg(args, config=None):\n \"\"\"\n Search the list of arguments for a valid ini-file for pytest,\n and return a tuple of (rootdir, inifile, cfg-dict).\n\n note: config is optional and used only to issue warnings explicitly (#2891).\n \"\"\"\n from _pytest.deprecated import CFG_PYTEST_SECTION\n\n inibasenames = [\"pytest.ini\", \"tox.ini\", \"setup.cfg\"]\n args = [x for x in args if not str(x).startswith(\"-\")]\n if not args:\n args = [py.path.local()]\n for arg in args:\n arg = py.path.local(arg)\n for base in arg.parts(reverse=True):\n for inibasename in inibasenames:\n p = base.join(inibasename)\n if exists(p):\n iniconfig = py.iniconfig.IniConfig(p)\n if (\n inibasename == \"setup.cfg\"\n and \"tool:pytest\" in iniconfig.sections\n ):\n return base, p, iniconfig[\"tool:pytest\"]\n elif \"pytest\" in iniconfig.sections:\n if inibasename == \"setup.cfg\" and config is not None:\n\n fail(\n CFG_PYTEST_SECTION.format(filename=inibasename),\n pytrace=False,\n )\n return base, p, iniconfig[\"pytest\"]\n elif inibasename == \"pytest.ini\":\n # allowed to be empty\n return base, p, {}\n return None, None, None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/findpaths.py_get_common_ancestor_get_common_ancestor.return.common_ancestor": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/findpaths.py_get_common_ancestor_get_common_ancestor.return.common_ancestor", "embedding": null, "metadata": {"file_path": "src/_pytest/config/findpaths.py", "file_name": "findpaths.py", "file_type": "text/x-python", "category": "implementation", "start_line": 55, "end_line": 75, "span_ids": ["get_common_ancestor"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_common_ancestor(paths):\n common_ancestor = None\n for path in paths:\n if not path.exists():\n continue\n if common_ancestor is None:\n common_ancestor = path\n else:\n if path.relto(common_ancestor) or path == common_ancestor:\n continue\n elif common_ancestor.relto(path):\n common_ancestor = path\n else:\n shared = path.common(common_ancestor)\n if shared is not None:\n common_ancestor = shared\n if common_ancestor is None:\n common_ancestor = py.path.local()\n elif common_ancestor.isfile():\n common_ancestor = common_ancestor.dirpath()\n return common_ancestor", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/findpaths.py_get_dirs_from_args_get_dirs_from_args.return._get_dir_from_path_path_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/findpaths.py_get_dirs_from_args_get_dirs_from_args.return._get_dir_from_path_path_", "embedding": null, "metadata": {"file_path": "src/_pytest/config/findpaths.py", "file_name": "findpaths.py", "file_type": "text/x-python", "category": "implementation", "start_line": 78, "end_line": 97, "span_ids": ["get_dirs_from_args"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_dirs_from_args(args):\n def is_option(x):\n return str(x).startswith(\"-\")\n\n def get_file_part_from_node_id(x):\n return str(x).split(\"::\")[0]\n\n def get_dir_from_path(path):\n if path.isdir():\n return path\n return py.path.local(path.dirname)\n\n # These look like paths but may not exist\n possible_paths = (\n py.path.local(get_file_part_from_node_id(arg))\n for arg in args\n if not is_option(arg)\n )\n\n return [get_dir_from_path(path) for path in possible_paths if path.exists()]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/findpaths.py_determine_setup_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/config/findpaths.py_determine_setup_", "embedding": null, "metadata": {"file_path": "src/_pytest/config/findpaths.py", "file_name": "findpaths.py", "file_type": "text/x-python", "category": "implementation", "start_line": 100, "end_line": 149, "span_ids": ["determine_setup"], "tokens": 445}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def determine_setup(inifile, args, rootdir_cmd_arg=None, config=None):\n dirs = get_dirs_from_args(args)\n if inifile:\n iniconfig = py.iniconfig.IniConfig(inifile)\n is_cfg_file = str(inifile).endswith(\".cfg\")\n sections = [\"tool:pytest\", \"pytest\"] if is_cfg_file else [\"pytest\"]\n for section in sections:\n try:\n inicfg = iniconfig[section]\n if is_cfg_file and section == \"pytest\" and config is not None:\n from _pytest.deprecated import CFG_PYTEST_SECTION\n\n fail(\n CFG_PYTEST_SECTION.format(filename=str(inifile)), pytrace=False\n )\n break\n except KeyError:\n inicfg = None\n if rootdir_cmd_arg is None:\n rootdir = get_common_ancestor(dirs)\n else:\n ancestor = get_common_ancestor(dirs)\n rootdir, inifile, inicfg = getcfg([ancestor], config=config)\n if rootdir is None and rootdir_cmd_arg is None:\n for possible_rootdir in ancestor.parts(reverse=True):\n if possible_rootdir.join(\"setup.py\").exists():\n rootdir = possible_rootdir\n break\n else:\n if dirs != [ancestor]:\n rootdir, inifile, inicfg = getcfg(dirs, config=config)\n if rootdir is None:\n if config is not None:\n cwd = config.invocation_dir\n else:\n cwd = py.path.local()\n rootdir = get_common_ancestor([cwd, ancestor])\n is_fs_root = os.path.splitdrive(str(rootdir))[1] == \"/\"\n if is_fs_root:\n rootdir = ancestor\n if rootdir_cmd_arg:\n rootdir = py.path.local(os.path.expandvars(rootdir_cmd_arg))\n if not rootdir.isdir():\n raise UsageError(\n \"Directory '{}' not found. Check your '--rootdir' option.\".format(\n rootdir\n )\n )\n return rootdir, inifile, inicfg or {}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py__interactive_debugging__validate_usepdb_cls.return._modname_classname_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py__interactive_debugging__validate_usepdb_cls.return._modname_classname_", "embedding": null, "metadata": {"file_path": "src/_pytest/debugging.py", "file_name": "debugging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 24, "span_ids": ["imports", "_validate_usepdb_cls", "docstring"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" interactive debugging with PDB, the Python Debugger. \"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport pdb\nimport sys\nfrom doctest import UnexpectedException\n\nfrom _pytest import outcomes\nfrom _pytest.config import hookimpl\nfrom _pytest.config.exceptions import UsageError\n\n\ndef _validate_usepdb_cls(value):\n \"\"\"Validate syntax of --pdbcls option.\"\"\"\n try:\n modname, classname = value.split(\":\")\n except ValueError:\n raise argparse.ArgumentTypeError(\n \"{!r} is not in the format 'modname:classname'\".format(value)\n )\n return (modname, classname)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py_pytest_addoption_pytest_addoption.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py_pytest_addoption_pytest_addoption.None_2", "embedding": null, "metadata": {"file_path": "src/_pytest/debugging.py", "file_name": "debugging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 27, "end_line": 48, "span_ids": ["pytest_addoption"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_addoption(parser):\n group = parser.getgroup(\"general\")\n group._addoption(\n \"--pdb\",\n dest=\"usepdb\",\n action=\"store_true\",\n help=\"start the interactive Python debugger on errors or KeyboardInterrupt.\",\n )\n group._addoption(\n \"--pdbcls\",\n dest=\"usepdb_cls\",\n metavar=\"modulename:classname\",\n type=_validate_usepdb_cls,\n help=\"start a custom interactive Python debugger on errors. \"\n \"For example: --pdbcls=IPython.terminal.debugger:TerminalPdb\",\n )\n group._addoption(\n \"--trace\",\n dest=\"trace\",\n action=\"store_true\",\n help=\"Immediately break when running each test.\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py__import_pdbcls__import_pdbcls.try_.except_Exception_as_exc_.raise_UsageError_pdbcl": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py__import_pdbcls__import_pdbcls.try_.except_Exception_as_exc_.raise_UsageError_pdbcl", "embedding": null, "metadata": {"file_path": "src/_pytest/debugging.py", "file_name": "debugging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 51, "end_line": 65, "span_ids": ["_import_pdbcls"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _import_pdbcls(modname, classname):\n try:\n __import__(modname)\n mod = sys.modules[modname]\n\n # Handle --pdbcls=pdb:pdb.Pdb (useful e.g. with pdbpp).\n parts = classname.split(\".\")\n pdb_cls = getattr(mod, parts[0])\n for part in parts[1:]:\n pdb_cls = getattr(pdb_cls, part)\n\n return pdb_cls\n except Exception as exc:\n value = \":\".join((modname, classname))\n raise UsageError(\"--pdbcls: could not import {!r}: {}\".format(value, exc))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py_pytest_configure_pytest_configure.config__cleanup_append_fi": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py_pytest_configure_pytest_configure.config__cleanup_append_fi", "embedding": null, "metadata": {"file_path": "src/_pytest/debugging.py", "file_name": "debugging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 68, "end_line": 98, "span_ids": ["pytest_configure"], "tokens": 248}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_configure(config):\n pdb_cls = config.getvalue(\"usepdb_cls\")\n if pdb_cls:\n pdb_cls = _import_pdbcls(*pdb_cls)\n else:\n pdb_cls = pdb.Pdb\n\n if config.getvalue(\"trace\"):\n config.pluginmanager.register(PdbTrace(), \"pdbtrace\")\n if config.getvalue(\"usepdb\"):\n config.pluginmanager.register(PdbInvoke(), \"pdbinvoke\")\n\n pytestPDB._saved.append(\n (pdb.set_trace, pytestPDB._pluginmanager, pytestPDB._config, pytestPDB._pdb_cls)\n )\n pdb.set_trace = pytestPDB.set_trace\n pytestPDB._pluginmanager = config.pluginmanager\n pytestPDB._config = config\n pytestPDB._pdb_cls = pdb_cls\n\n # NOTE: not using pytest_unconfigure, since it might get called although\n # pytest_configure was not (if another plugin raises UsageError).\n def fin():\n (\n pdb.set_trace,\n pytestPDB._pluginmanager,\n pytestPDB._config,\n pytestPDB._pdb_cls,\n ) = pytestPDB._saved.pop()\n\n config._cleanup.append(fin)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py_pytestPDB_pytestPDB.set_trace._pdb_set_trace_frame_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py_pytestPDB_pytestPDB.set_trace._pdb_set_trace_frame_", "embedding": null, "metadata": {"file_path": "src/_pytest/debugging.py", "file_name": "debugging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 101, "end_line": 220, "span_ids": ["pytestPDB._is_capturing", "pytestPDB._init_pdb.if_cls__pluginmanager_is_.PytestPdbWrapper:2", "pytestPDB", "pytestPDB._init_pdb.if_cls__pluginmanager_is_.PytestPdbWrapper", "pytestPDB.set_trace", "pytestPDB._init_pdb"], "tokens": 917}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class pytestPDB(object):\n \"\"\" Pseudo PDB that defers to the real pdb. \"\"\"\n\n _pluginmanager = None\n _config = None\n _pdb_cls = pdb.Pdb\n _saved = []\n _recursive_debug = 0\n\n @classmethod\n def _is_capturing(cls, capman):\n if capman:\n return capman.is_capturing()\n return False\n\n @classmethod\n def _init_pdb(cls, *args, **kwargs):\n \"\"\" Initialize PDB debugging, dropping any IO capturing. \"\"\"\n import _pytest.config\n\n if cls._pluginmanager is not None:\n capman = cls._pluginmanager.getplugin(\"capturemanager\")\n if capman:\n capman.suspend(in_=True)\n tw = _pytest.config.create_terminal_writer(cls._config)\n tw.line()\n if cls._recursive_debug == 0:\n # Handle header similar to pdb.set_trace in py37+.\n header = kwargs.pop(\"header\", None)\n if header is not None:\n tw.sep(\">\", header)\n else:\n capturing = cls._is_capturing(capman)\n if capturing:\n if capturing == \"global\":\n tw.sep(\">\", \"PDB set_trace (IO-capturing turned off)\")\n else:\n tw.sep(\n \">\",\n \"PDB set_trace (IO-capturing turned off for %s)\"\n % capturing,\n )\n else:\n tw.sep(\">\", \"PDB set_trace\")\n\n class PytestPdbWrapper(cls._pdb_cls, object):\n _pytest_capman = capman\n _continued = False\n\n def do_debug(self, arg):\n cls._recursive_debug += 1\n ret = super(PytestPdbWrapper, self).do_debug(arg)\n cls._recursive_debug -= 1\n return ret\n\n def do_continue(self, arg):\n ret = super(PytestPdbWrapper, self).do_continue(arg)\n if cls._recursive_debug == 0:\n tw = _pytest.config.create_terminal_writer(cls._config)\n tw.line()\n\n capman = self._pytest_capman\n capturing = pytestPDB._is_capturing(capman)\n if capturing:\n if capturing == \"global\":\n tw.sep(\">\", \"PDB continue (IO-capturing resumed)\")\n else:\n tw.sep(\n \">\",\n \"PDB continue (IO-capturing resumed for %s)\"\n % capturing,\n )\n capman.resume()\n else:\n tw.sep(\">\", \"PDB continue\")\n cls._pluginmanager.hook.pytest_leave_pdb(\n config=cls._config, pdb=self\n )\n self._continued = True\n return ret\n\n do_c = do_cont = do_continue\n\n def set_quit(self):\n \"\"\"Raise Exit outcome when quit command is used in pdb.\n\n This is a bit of a hack - it would be better if BdbQuit\n could be handled, but this would require to wrap the\n whole pytest run, and adjust the report etc.\n \"\"\"\n super(PytestPdbWrapper, self).set_quit()\n if cls._recursive_debug == 0:\n outcomes.exit(\"Quitting debugger\")\n\n def setup(self, f, tb):\n \"\"\"Suspend on setup().\n\n Needed after do_continue resumed, and entering another\n breakpoint again.\n \"\"\"\n ret = super(PytestPdbWrapper, self).setup(f, tb)\n if not ret and self._continued:\n # pdb.setup() returns True if the command wants to exit\n # from the interaction: do not suspend capturing then.\n if self._pytest_capman:\n self._pytest_capman.suspend_global_capture(in_=True)\n return ret\n\n _pdb = PytestPdbWrapper(**kwargs)\n cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config, pdb=_pdb)\n else:\n _pdb = cls._pdb_cls(**kwargs)\n return _pdb\n\n @classmethod\n def set_trace(cls, *args, **kwargs):\n \"\"\"Invoke debugging via ``Pdb.set_trace``, dropping any IO capturing.\"\"\"\n frame = sys._getframe().f_back\n _pdb = cls._init_pdb(*args, **kwargs)\n _pdb.set_trace(frame)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py_PdbInvoke_PdbTrace.pytest_pyfunc_call.yield": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py_PdbInvoke_PdbTrace.pytest_pyfunc_call.yield", "embedding": null, "metadata": {"file_path": "src/_pytest/debugging.py", "file_name": "debugging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 229, "end_line": 248, "span_ids": ["PdbInvoke", "PdbInvoke.pytest_internalerror", "PdbInvoke.pytest_exception_interact", "PdbTrace.pytest_pyfunc_call", "PdbTrace"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class PdbInvoke(object):\n def pytest_exception_interact(self, node, call, report):\n capman = node.config.pluginmanager.getplugin(\"capturemanager\")\n if capman:\n capman.suspend_global_capture(in_=True)\n out, err = capman.read_global_capture()\n sys.stdout.write(out)\n sys.stdout.write(err)\n _enter_pdb(node, call.excinfo, report)\n\n def pytest_internalerror(self, excrepr, excinfo):\n tb = _postmortem_traceback(excinfo)\n post_mortem(tb)\n\n\nclass PdbTrace(object):\n @hookimpl(hookwrapper=True)\n def pytest_pyfunc_call(self, pyfuncitem):\n _test_pytest_function(pyfuncitem)\n yield", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py__test_pytest_function__test_pytest_function.pyfuncitem._fixtureinfo.argnames.tuple_new_list_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py__test_pytest_function__test_pytest_function.pyfuncitem._fixtureinfo.argnames.tuple_new_list_", "embedding": null, "metadata": {"file_path": "src/_pytest/debugging.py", "file_name": "debugging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 251, "end_line": 260, "span_ids": ["_test_pytest_function"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _test_pytest_function(pyfuncitem):\n _pdb = pytestPDB._init_pdb()\n testfunction = pyfuncitem.obj\n pyfuncitem.obj = _pdb.runcall\n if \"func\" in pyfuncitem._fixtureinfo.argnames: # pragma: no branch\n raise ValueError(\"--trace can't be used with a fixture named func!\")\n pyfuncitem.funcargs[\"func\"] = testfunction\n new_list = list(pyfuncitem._fixtureinfo.argnames)\n new_list.append(\"func\")\n pyfuncitem._fixtureinfo.argnames = tuple(new_list)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py__enter_pdb__enter_pdb.return.rep": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py__enter_pdb__enter_pdb.return.rep", "embedding": null, "metadata": {"file_path": "src/_pytest/debugging.py", "file_name": "debugging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 263, "end_line": 289, "span_ids": ["_enter_pdb"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _enter_pdb(node, excinfo, rep):\n # XXX we re-use the TerminalReporter's terminalwriter\n # because this seems to avoid some encoding related troubles\n # for not completely clear reasons.\n tw = node.config.pluginmanager.getplugin(\"terminalreporter\")._tw\n tw.line()\n\n showcapture = node.config.option.showcapture\n\n for sectionname, content in (\n (\"stdout\", rep.capstdout),\n (\"stderr\", rep.capstderr),\n (\"log\", rep.caplog),\n ):\n if showcapture in (sectionname, \"all\") and content:\n tw.sep(\">\", \"captured \" + sectionname)\n if content[-1:] == \"\\n\":\n content = content[:-1]\n tw.line(content)\n\n tw.sep(\">\", \"traceback\")\n rep.toterminal(tw)\n tw.sep(\">\", \"entering PDB\")\n tb = _postmortem_traceback(excinfo)\n rep._pdbshown = True\n post_mortem(tb)\n return rep", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py__postmortem_traceback_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/debugging.py__postmortem_traceback_", "embedding": null, "metadata": {"file_path": "src/_pytest/debugging.py", "file_name": "debugging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 292, "end_line": 321, "span_ids": ["post_mortem.Pdb.get_stack", "post_mortem", "_postmortem_traceback", "post_mortem.Pdb", "_find_last_non_hidden_frame"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _postmortem_traceback(excinfo):\n if isinstance(excinfo.value, UnexpectedException):\n # A doctest.UnexpectedException is not useful for post_mortem.\n # Use the underlying exception instead:\n return excinfo.value.exc_info[2]\n else:\n return excinfo._excinfo[2]\n\n\ndef _find_last_non_hidden_frame(stack):\n i = max(0, len(stack) - 1)\n while i and stack[i][0].f_locals.get(\"__tracebackhide__\", False):\n i -= 1\n return i\n\n\ndef post_mortem(t):\n class Pdb(pytestPDB._pdb_cls, object):\n def get_stack(self, f, t):\n stack, i = super(Pdb, self).get_stack(f, t)\n if f is None:\n i = _find_last_non_hidden_frame(stack)\n return stack, i\n\n p = Pdb()\n p.reset()\n p.interaction(None, t)\n if p.quitting:\n outcomes.exit(\"Quitting debugger\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/deprecated.py___PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST._": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/deprecated.py___PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST._", "embedding": null, "metadata": {"file_path": "src/_pytest/deprecated.py", "file_name": "deprecated.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 68, "span_ids": ["imports", "docstring", "impl"], "tokens": 732}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nThis module contains deprecation messages and bits of code used elsewhere in the codebase\nthat is planned to be removed in the next pytest release.\n\nKeeping it in a central location makes it easy to track what is deprecated and should\nbe removed when the time comes.\n\nAll constants defined in this module should be either PytestWarning instances or UnformattedWarning\nin case of warnings which need to format their messages.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom _pytest.warning_types import PytestDeprecationWarning\nfrom _pytest.warning_types import RemovedInPytest4Warning\nfrom _pytest.warning_types import UnformattedWarning\n\nYIELD_TESTS = \"yield tests were removed in pytest 4.0 - {name} will be ignored\"\n\n\nFIXTURE_FUNCTION_CALL = (\n 'Fixture \"{name}\" called directly. Fixtures are not meant to be called directly,\\n'\n \"but are created automatically when test functions request them as parameters.\\n\"\n \"See https://docs.pytest.org/en/latest/fixture.html for more information about fixtures, and\\n\"\n \"https://docs.pytest.org/en/latest/deprecations.html#calling-fixtures-directly about how to update your code.\"\n)\n\nFIXTURE_NAMED_REQUEST = PytestDeprecationWarning(\n \"'request' is a reserved name for fixtures and will raise an error in future versions\"\n)\n\nCFG_PYTEST_SECTION = \"[pytest] section in {filename} files is no longer supported, change to [tool:pytest] instead.\"\n\nGETFUNCARGVALUE = RemovedInPytest4Warning(\n \"getfuncargvalue is deprecated, use getfixturevalue\"\n)\n\nRAISES_MESSAGE_PARAMETER = PytestDeprecationWarning(\n \"The 'message' parameter is deprecated.\\n\"\n \"(did you mean to use `match='some regex'` to check the exception message?)\\n\"\n \"Please comment on https://github.com/pytest-dev/pytest/issues/3974 \"\n \"if you have concerns about removal of this parameter.\"\n)\n\nRESULT_LOG = PytestDeprecationWarning(\n \"--result-log is deprecated and scheduled for removal in pytest 5.0.\\n\"\n \"See https://docs.pytest.org/en/latest/deprecations.html#result-log-result-log for more information.\"\n)\n\nRAISES_EXEC = PytestDeprecationWarning(\n \"raises(..., 'code(as_a_string)') is deprecated, use the context manager form or use `exec()` directly\\n\\n\"\n \"See https://docs.pytest.org/en/latest/deprecations.html#raises-warns-exec\"\n)\nWARNS_EXEC = PytestDeprecationWarning(\n \"warns(..., 'code(as_a_string)') is deprecated, use the context manager form or use `exec()` directly.\\n\\n\"\n \"See https://docs.pytest.org/en/latest/deprecations.html#raises-warns-exec\"\n)\n\nPYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST = (\n \"Defining 'pytest_plugins' in a non-top-level conftest is no longer supported \"\n \"because it affects the entire directory tree in a non-explicit way.\\n\"\n \" {}\\n\"\n \"Please move it to a top level conftest file at the rootdir:\\n\"\n \" {}\\n\"\n \"For more information, visit:\\n\"\n \" https://docs.pytest.org/en/latest/deprecations.html#pytest-plugins-in-non-top-level-conftest-files\"\n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/deprecated.py_PYTEST_CONFIG_GLOBAL_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/deprecated.py_PYTEST_CONFIG_GLOBAL_", "embedding": null, "metadata": {"file_path": "src/_pytest/deprecated.py", "file_name": "deprecated.py", "file_type": "text/x-python", "category": "implementation", "start_line": 70, "end_line": 96, "span_ids": ["impl:21"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "PYTEST_CONFIG_GLOBAL = PytestDeprecationWarning(\n \"the `pytest.config` global is deprecated. Please use `request.config` \"\n \"or `pytest_configure` (if you're a pytest plugin) instead.\"\n)\n\nPYTEST_ENSURETEMP = RemovedInPytest4Warning(\n \"pytest/tmpdir_factory.ensuretemp is deprecated, \\n\"\n \"please use the tmp_path fixture or tmp_path_factory.mktemp\"\n)\n\nPYTEST_LOGWARNING = PytestDeprecationWarning(\n \"pytest_logwarning is deprecated, no longer being called, and will be removed soon\\n\"\n \"please use pytest_warning_captured instead\"\n)\n\nPYTEST_WARNS_UNKNOWN_KWARGS = UnformattedWarning(\n PytestDeprecationWarning,\n \"pytest.warns() got unexpected keyword arguments: {args!r}.\\n\"\n \"This will be an error in future versions.\",\n)\n\nPYTEST_PARAM_UNKNOWN_KWARGS = UnformattedWarning(\n PytestDeprecationWarning,\n \"pytest.param() got unexpected keyword arguments: {args!r}.\\n\"\n \"This will be an error in future versions.\",\n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__discover_and_run_doct_RUNNER_CLASS.None": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__discover_and_run_doct_RUNNER_CLASS.None", "embedding": null, "metadata": {"file_path": "src/_pytest/doctest.py", "file_name": "doctest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 35, "span_ids": ["imports", "docstring", "impl"], "tokens": 253}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" discover and run doctests in modules and test files.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport inspect\nimport platform\nimport sys\nimport traceback\nfrom contextlib import contextmanager\n\nimport pytest\nfrom _pytest._code.code import ExceptionInfo\nfrom _pytest._code.code import ReprFileLocation\nfrom _pytest._code.code import TerminalRepr\nfrom _pytest.compat import safe_getattr\nfrom _pytest.fixtures import FixtureRequest\nfrom _pytest.outcomes import Skipped\n\nDOCTEST_REPORT_CHOICE_NONE = \"none\"\nDOCTEST_REPORT_CHOICE_CDIFF = \"cdiff\"\nDOCTEST_REPORT_CHOICE_NDIFF = \"ndiff\"\nDOCTEST_REPORT_CHOICE_UDIFF = \"udiff\"\nDOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = \"only_first_failure\"\n\nDOCTEST_REPORT_CHOICES = (\n DOCTEST_REPORT_CHOICE_NONE,\n DOCTEST_REPORT_CHOICE_CDIFF,\n DOCTEST_REPORT_CHOICE_NDIFF,\n DOCTEST_REPORT_CHOICE_UDIFF,\n DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE,\n)\n\n# Lazy definition of runner class\nRUNNER_CLASS = None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py_pytest_addoption_pytest_addoption.None_6": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py_pytest_addoption_pytest_addoption.None_6", "embedding": null, "metadata": {"file_path": "src/_pytest/doctest.py", "file_name": "doctest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 38, "end_line": 85, "span_ids": ["pytest_addoption"], "tokens": 321}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_addoption(parser):\n parser.addini(\n \"doctest_optionflags\",\n \"option flags for doctests\",\n type=\"args\",\n default=[\"ELLIPSIS\"],\n )\n parser.addini(\n \"doctest_encoding\", \"encoding used for doctest files\", default=\"utf-8\"\n )\n group = parser.getgroup(\"collect\")\n group.addoption(\n \"--doctest-modules\",\n action=\"store_true\",\n default=False,\n help=\"run doctests in all .py modules\",\n dest=\"doctestmodules\",\n )\n group.addoption(\n \"--doctest-report\",\n type=str.lower,\n default=\"udiff\",\n help=\"choose another output format for diffs on doctest failure\",\n choices=DOCTEST_REPORT_CHOICES,\n dest=\"doctestreport\",\n )\n group.addoption(\n \"--doctest-glob\",\n action=\"append\",\n default=[],\n metavar=\"pat\",\n help=\"doctests file matching pattern, default: test*.txt\",\n dest=\"doctestglob\",\n )\n group.addoption(\n \"--doctest-ignore-import-errors\",\n action=\"store_true\",\n default=False,\n help=\"ignore doctest ImportErrors\",\n dest=\"doctest_ignore_import_errors\",\n )\n group.addoption(\n \"--doctest-continue-on-failure\",\n action=\"store_true\",\n default=False,\n help=\"for a given doctest, continue to run after the first failure\",\n dest=\"doctest_continue_on_failure\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py_pytest_collect_file_MultipleDoctestFailures.__init__.self.failures.failures": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py_pytest_collect_file_MultipleDoctestFailures.__init__.self.failures.failures", "embedding": null, "metadata": {"file_path": "src/_pytest/doctest.py", "file_name": "doctest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 88, "end_line": 129, "span_ids": ["ReprFailDoctest.toterminal", "_is_setup_py", "ReprFailDoctest", "_is_doctest", "MultipleDoctestFailures", "pytest_collect_file"], "tokens": 319}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_collect_file(path, parent):\n config = parent.config\n if path.ext == \".py\":\n if config.option.doctestmodules and not _is_setup_py(config, path, parent):\n return DoctestModule(path, parent)\n elif _is_doctest(config, path, parent):\n return DoctestTextfile(path, parent)\n\n\ndef _is_setup_py(config, path, parent):\n if path.basename != \"setup.py\":\n return False\n contents = path.read()\n return \"setuptools\" in contents or \"distutils\" in contents\n\n\ndef _is_doctest(config, path, parent):\n if path.ext in (\".txt\", \".rst\") and parent.session.isinitpath(path):\n return True\n globs = config.getoption(\"doctestglob\") or [\"test*.txt\"]\n for glob in globs:\n if path.check(fnmatch=glob):\n return True\n return False\n\n\nclass ReprFailDoctest(TerminalRepr):\n def __init__(self, reprlocation_lines):\n # List of (reprlocation, lines) tuples\n self.reprlocation_lines = reprlocation_lines\n\n def toterminal(self, tw):\n for reprlocation, lines in self.reprlocation_lines:\n for line in lines:\n tw.line(line)\n reprlocation.toterminal(tw)\n\n\nclass MultipleDoctestFailures(Exception):\n def __init__(self, failures):\n super(MultipleDoctestFailures, self).__init__()\n self.failures = failures", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__init_runner_class__get_runner.return.RUNNER_CLASS_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__init_runner_class__get_runner.return.RUNNER_CLASS_", "embedding": null, "metadata": {"file_path": "src/_pytest/doctest.py", "file_name": "doctest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 132, "end_line": 178, "span_ids": ["_init_runner_class.PytestDoctestRunner:2", "_get_runner", "_init_runner_class.PytestDoctestRunner", "_init_runner_class"], "tokens": 342}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _init_runner_class():\n import doctest\n\n class PytestDoctestRunner(doctest.DebugRunner):\n \"\"\"\n Runner to collect failures. Note that the out variable in this case is\n a list instead of a stdout-like object\n \"\"\"\n\n def __init__(\n self, checker=None, verbose=None, optionflags=0, continue_on_failure=True\n ):\n doctest.DebugRunner.__init__(\n self, checker=checker, verbose=verbose, optionflags=optionflags\n )\n self.continue_on_failure = continue_on_failure\n\n def report_failure(self, out, test, example, got):\n failure = doctest.DocTestFailure(test, example, got)\n if self.continue_on_failure:\n out.append(failure)\n else:\n raise failure\n\n def report_unexpected_exception(self, out, test, example, exc_info):\n if isinstance(exc_info[1], Skipped):\n raise exc_info[1]\n failure = doctest.UnexpectedException(test, example, exc_info)\n if self.continue_on_failure:\n out.append(failure)\n else:\n raise failure\n\n return PytestDoctestRunner\n\n\ndef _get_runner(checker=None, verbose=None, optionflags=0, continue_on_failure=True):\n # We need this in order to do a lazy import on doctest\n global RUNNER_CLASS\n if RUNNER_CLASS is None:\n RUNNER_CLASS = _init_runner_class()\n return RUNNER_CLASS(\n checker=checker,\n verbose=verbose,\n optionflags=optionflags,\n continue_on_failure=continue_on_failure,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py_DoctestItem_DoctestItem._disable_output_capturing_for_darwin.if_capman_.sys_stderr_write_err_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py_DoctestItem_DoctestItem._disable_output_capturing_for_darwin.if_capman_.sys_stderr_write_err_", "embedding": null, "metadata": {"file_path": "src/_pytest/doctest.py", "file_name": "doctest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 181, "end_line": 218, "span_ids": ["DoctestItem", "DoctestItem._disable_output_capturing_for_darwin", "DoctestItem.runtest", "DoctestItem.setup"], "tokens": 309}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DoctestItem(pytest.Item):\n def __init__(self, name, parent, runner=None, dtest=None):\n super(DoctestItem, self).__init__(name, parent)\n self.runner = runner\n self.dtest = dtest\n self.obj = None\n self.fixture_request = None\n\n def setup(self):\n if self.dtest is not None:\n self.fixture_request = _setup_fixtures(self)\n globs = dict(getfixture=self.fixture_request.getfixturevalue)\n for name, value in self.fixture_request.getfixturevalue(\n \"doctest_namespace\"\n ).items():\n globs[name] = value\n self.dtest.globs.update(globs)\n\n def runtest(self):\n _check_all_skipped(self.dtest)\n self._disable_output_capturing_for_darwin()\n failures = []\n self.runner.run(self.dtest, out=failures)\n if failures:\n raise MultipleDoctestFailures(failures)\n\n def _disable_output_capturing_for_darwin(self):\n \"\"\"\n Disable output capturing. Otherwise, stdout is lost to doctest (#985)\n \"\"\"\n if platform.system() != \"Darwin\":\n return\n capman = self.config.pluginmanager.getplugin(\"capturemanager\")\n if capman:\n capman.suspend_global_capture(in_=True)\n out, err = capman.read_global_capture()\n sys.stdout.write(out)\n sys.stderr.write(err)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py_DoctestItem.repr_failure_DoctestItem.reportinfo.return.self_fspath_self_dtest_l": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py_DoctestItem.repr_failure_DoctestItem.reportinfo.return.self_fspath_self_dtest_l", "embedding": null, "metadata": {"file_path": "src/_pytest/doctest.py", "file_name": "doctest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 220, "end_line": 276, "span_ids": ["DoctestItem.reportinfo", "DoctestItem.repr_failure"], "tokens": 505}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DoctestItem(pytest.Item):\n\n def repr_failure(self, excinfo):\n import doctest\n\n failures = None\n if excinfo.errisinstance((doctest.DocTestFailure, doctest.UnexpectedException)):\n failures = [excinfo.value]\n elif excinfo.errisinstance(MultipleDoctestFailures):\n failures = excinfo.value.failures\n\n if failures is not None:\n reprlocation_lines = []\n for failure in failures:\n example = failure.example\n test = failure.test\n filename = test.filename\n if test.lineno is None:\n lineno = None\n else:\n lineno = test.lineno + example.lineno + 1\n message = type(failure).__name__\n reprlocation = ReprFileLocation(filename, lineno, message)\n checker = _get_checker()\n report_choice = _get_report_choice(\n self.config.getoption(\"doctestreport\")\n )\n if lineno is not None:\n lines = failure.test.docstring.splitlines(False)\n # add line numbers to the left of the error message\n lines = [\n \"%03d %s\" % (i + test.lineno + 1, x)\n for (i, x) in enumerate(lines)\n ]\n # trim docstring error lines to 10\n lines = lines[max(example.lineno - 9, 0) : example.lineno + 1]\n else:\n lines = [\n \"EXAMPLE LOCATION UNKNOWN, not showing all tests of that example\"\n ]\n indent = \">>>\"\n for line in example.source.splitlines():\n lines.append(\"??? %s %s\" % (indent, line))\n indent = \"...\"\n if isinstance(failure, doctest.DocTestFailure):\n lines += checker.output_difference(\n example, failure.got, report_choice\n ).split(\"\\n\")\n else:\n inner_excinfo = ExceptionInfo(failure.exc_info)\n lines += [\"UNEXPECTED EXCEPTION: %s\" % repr(inner_excinfo.value)]\n lines += traceback.format_exception(*failure.exc_info)\n reprlocation_lines.append((reprlocation, lines))\n return ReprFailDoctest(reprlocation_lines)\n else:\n return super(DoctestItem, self).repr_failure(excinfo)\n\n def reportinfo(self):\n return self.fspath, self.dtest.lineno, \"[doctest] %s\" % self.name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__get_flag_lookup__get_flag_lookup.return.dict_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__get_flag_lookup__get_flag_lookup.return.dict_", "embedding": null, "metadata": {"file_path": "src/_pytest/doctest.py", "file_name": "doctest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 279, "end_line": 291, "span_ids": ["_get_flag_lookup"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _get_flag_lookup():\n import doctest\n\n return dict(\n DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1,\n DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE,\n NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,\n ELLIPSIS=doctest.ELLIPSIS,\n IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,\n COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,\n ALLOW_UNICODE=_get_allow_unicode_flag(),\n ALLOW_BYTES=_get_allow_bytes_flag(),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py_get_optionflags__get_continue_on_failure.return.continue_on_failure": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py_get_optionflags__get_continue_on_failure.return.continue_on_failure", "embedding": null, "metadata": {"file_path": "src/_pytest/doctest.py", "file_name": "doctest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 294, "end_line": 310, "span_ids": ["_get_continue_on_failure", "get_optionflags"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_optionflags(parent):\n optionflags_str = parent.config.getini(\"doctest_optionflags\")\n flag_lookup_table = _get_flag_lookup()\n flag_acc = 0\n for flag in optionflags_str:\n flag_acc |= flag_lookup_table[flag]\n return flag_acc\n\n\ndef _get_continue_on_failure(config):\n continue_on_failure = config.getvalue(\"doctest_continue_on_failure\")\n if continue_on_failure:\n # We need to turn off this if we use pdb since we should stop at\n # the first failure\n if config.getvalue(\"usepdb\"):\n continue_on_failure = False\n return continue_on_failure", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py_DoctestTextfile_DoctestTextfile.collect.if_test_examples_.yield_DoctestItem_test_na": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py_DoctestTextfile_DoctestTextfile.collect.if_test_examples_.yield_DoctestItem_test_na", "embedding": null, "metadata": {"file_path": "src/_pytest/doctest.py", "file_name": "doctest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 313, "end_line": 340, "span_ids": ["DoctestTextfile.collect", "DoctestTextfile"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DoctestTextfile(pytest.Module):\n obj = None\n\n def collect(self):\n import doctest\n\n # inspired by doctest.testfile; ideally we would use it directly,\n # but it doesn't support passing a custom checker\n encoding = self.config.getini(\"doctest_encoding\")\n text = self.fspath.read_text(encoding)\n filename = str(self.fspath)\n name = self.fspath.basename\n globs = {\"__name__\": \"__main__\"}\n\n optionflags = get_optionflags(self)\n\n runner = _get_runner(\n verbose=0,\n optionflags=optionflags,\n checker=_get_checker(),\n continue_on_failure=_get_continue_on_failure(self.config),\n )\n _fix_spoof_python2(runner, encoding)\n\n parser = doctest.DocTestParser()\n test = parser.get_doctest(text, globs, name, filename, 0)\n if test.examples:\n yield DoctestItem(test.name, self, runner, test)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__check_all_skipped__is_mocked.return._": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__check_all_skipped__is_mocked.return._", "embedding": null, "metadata": {"file_path": "src/_pytest/doctest.py", "file_name": "doctest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 343, "end_line": 361, "span_ids": ["_check_all_skipped", "_is_mocked"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _check_all_skipped(test):\n \"\"\"raises pytest.skip() if all examples in the given DocTest have the SKIP\n option set.\n \"\"\"\n import doctest\n\n all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples)\n if all_skipped:\n pytest.skip(\"all tests skipped by +SKIP option\")\n\n\ndef _is_mocked(obj):\n \"\"\"\n returns if a object is possibly a mock object by checking the existence of a highly improbable attribute\n \"\"\"\n return (\n safe_getattr(obj, \"pytest_mock_example_attribute_that_shouldnt_exist\", None)\n is not None\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__patch_unwrap_mock_aware__patch_unwrap_mock_aware.if_real_unwrap_is_None_.else_.try_.finally_.inspect.unwrap.real_unwrap": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__patch_unwrap_mock_aware__patch_unwrap_mock_aware.if_real_unwrap_is_None_.else_.try_.finally_.inspect.unwrap.real_unwrap", "embedding": null, "metadata": {"file_path": "src/_pytest/doctest.py", "file_name": "doctest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 364, "end_line": 385, "span_ids": ["_patch_unwrap_mock_aware"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@contextmanager\ndef _patch_unwrap_mock_aware():\n \"\"\"\n contextmanager which replaces ``inspect.unwrap`` with a version\n that's aware of mock objects and doesn't recurse on them\n \"\"\"\n real_unwrap = getattr(inspect, \"unwrap\", None)\n if real_unwrap is None:\n yield\n else:\n\n def _mock_aware_unwrap(obj, stop=None):\n if stop is None:\n return real_unwrap(obj, stop=_is_mocked)\n else:\n return real_unwrap(obj, stop=lambda obj: _is_mocked(obj) or stop(obj))\n\n inspect.unwrap = _mock_aware_unwrap\n try:\n yield\n finally:\n inspect.unwrap = real_unwrap", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py_DoctestModule_DoctestModule.collect.MockAwareDocTestFinder._find.with__patch_unwrap_mock_a.doctest_DocTestFinder__fi": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py_DoctestModule_DoctestModule.collect.MockAwareDocTestFinder._find.with__patch_unwrap_mock_a.doctest_DocTestFinder__fi", "embedding": null, "metadata": {"file_path": "src/_pytest/doctest.py", "file_name": "doctest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 388, "end_line": 407, "span_ids": ["DoctestModule.collect", "DoctestModule.collect.MockAwareDocTestFinder", "DoctestModule.collect.MockAwareDocTestFinder:2", "DoctestModule"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DoctestModule(pytest.Module):\n def collect(self):\n import doctest\n\n class MockAwareDocTestFinder(doctest.DocTestFinder):\n \"\"\"\n a hackish doctest finder that overrides stdlib internals to fix a stdlib bug\n\n https://github.com/pytest-dev/pytest/issues/3456\n https://bugs.python.org/issue25532\n \"\"\"\n\n def _find(self, tests, obj, name, module, source_lines, globs, seen):\n if _is_mocked(obj):\n return\n with _patch_unwrap_mock_aware():\n\n doctest.DocTestFinder._find(\n self, tests, obj, name, module, source_lines, globs, seen\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py_DoctestModule.collect.if_self_fspath_basename__DoctestModule.collect.for_test_in_finder_find_m.if_test_examples_skip.yield_DoctestItem_test_na": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py_DoctestModule.collect.if_self_fspath_basename__DoctestModule.collect.for_test_in_finder_find_m.if_test_examples_skip.yield_DoctestItem_test_na", "embedding": null, "metadata": {"file_path": "src/_pytest/doctest.py", "file_name": "doctest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 409, "end_line": 431, "span_ids": ["DoctestModule.collect.MockAwareDocTestFinder:2"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DoctestModule(pytest.Module):\n def collect(self):\n # ... other code\n\n if self.fspath.basename == \"conftest.py\":\n module = self.config.pluginmanager._importconftest(self.fspath)\n else:\n try:\n module = self.fspath.pyimport()\n except ImportError:\n if self.config.getvalue(\"doctest_ignore_import_errors\"):\n pytest.skip(\"unable to import module %r\" % self.fspath)\n else:\n raise\n # uses internal doctest module parsing mechanism\n finder = MockAwareDocTestFinder()\n optionflags = get_optionflags(self)\n runner = _get_runner(\n verbose=0,\n optionflags=optionflags,\n checker=_get_checker(),\n continue_on_failure=_get_continue_on_failure(self.config),\n )\n\n for test in finder.find(module, module.__name__):\n if test.examples: # skip empty doctests\n yield DoctestItem(test.name, self, runner, test)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__setup_fixtures__setup_fixtures.return.fixture_request": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__setup_fixtures__setup_fixtures.return.fixture_request", "embedding": null, "metadata": {"file_path": "src/_pytest/doctest.py", "file_name": "doctest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 434, "end_line": 449, "span_ids": ["_setup_fixtures"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _setup_fixtures(doctest_item):\n \"\"\"\n Used by DoctestTextfile and DoctestItem to setup fixture information.\n \"\"\"\n\n def func():\n pass\n\n doctest_item.funcargs = {}\n fm = doctest_item.session._fixturemanager\n doctest_item._fixtureinfo = fm.getfixtureinfo(\n node=doctest_item, func=func, cls=None, funcargs=False\n )\n fixture_request = FixtureRequest(doctest_item)\n fixture_request._fillfixtures()\n return fixture_request", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__get_checker__get_checker.re": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__get_checker__get_checker.re", "embedding": null, "metadata": {"file_path": "src/_pytest/doctest.py", "file_name": "doctest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 452, "end_line": 466, "span_ids": ["_get_checker"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _get_checker():\n \"\"\"\n Returns a doctest.OutputChecker subclass that takes in account the\n ALLOW_UNICODE option to ignore u'' prefixes in strings and ALLOW_BYTES\n to strip b'' prefixes.\n Useful when the same doctest should run in Python 2 and Python 3.\n\n An inner class is used to avoid importing \"doctest\" at the module\n level.\n \"\"\"\n if hasattr(_get_checker, \"LiteralsOutputChecker\"):\n return _get_checker.LiteralsOutputChecker()\n\n import doctest\n import re\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__get_checker.LiteralsOutputChecker__get_checker.return._get_checker_LiteralsOutp": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__get_checker.LiteralsOutputChecker__get_checker.return._get_checker_LiteralsOutp", "embedding": null, "metadata": {"file_path": "src/_pytest/doctest.py", "file_name": "doctest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 468, "end_line": 504, "span_ids": ["_get_checker.LiteralsOutputChecker", "_get_checker.LiteralsOutputChecker:2"], "tokens": 343}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _get_checker():\n # ... other code\n\n class LiteralsOutputChecker(doctest.OutputChecker):\n \"\"\"\n Copied from doctest_nose_plugin.py from the nltk project:\n https://github.com/nltk/nltk\n\n Further extended to also support byte literals.\n \"\"\"\n\n _unicode_literal_re = re.compile(r\"(\\W|^)[uU]([rR]?[\\'\\\"])\", re.UNICODE)\n _bytes_literal_re = re.compile(r\"(\\W|^)[bB]([rR]?[\\'\\\"])\", re.UNICODE)\n\n def check_output(self, want, got, optionflags):\n res = doctest.OutputChecker.check_output(self, want, got, optionflags)\n if res:\n return True\n\n allow_unicode = optionflags & _get_allow_unicode_flag()\n allow_bytes = optionflags & _get_allow_bytes_flag()\n if not allow_unicode and not allow_bytes:\n return False\n\n else: # pragma: no cover\n\n def remove_prefixes(regex, txt):\n return re.sub(regex, r\"\\1\\2\", txt)\n\n if allow_unicode:\n want = remove_prefixes(self._unicode_literal_re, want)\n got = remove_prefixes(self._unicode_literal_re, got)\n if allow_bytes:\n want = remove_prefixes(self._bytes_literal_re, want)\n got = remove_prefixes(self._bytes_literal_re, got)\n res = doctest.OutputChecker.check_output(self, want, got, optionflags)\n return res\n\n _get_checker.LiteralsOutputChecker = LiteralsOutputChecker\n return _get_checker.LiteralsOutputChecker()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__get_allow_unicode_flag__get_report_choice.return._": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__get_allow_unicode_flag__get_report_choice.return._", "embedding": null, "metadata": {"file_path": "src/_pytest/doctest.py", "file_name": "doctest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 507, "end_line": 538, "span_ids": ["_get_report_choice", "_get_allow_unicode_flag", "_get_allow_bytes_flag"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _get_allow_unicode_flag():\n \"\"\"\n Registers and returns the ALLOW_UNICODE flag.\n \"\"\"\n import doctest\n\n return doctest.register_optionflag(\"ALLOW_UNICODE\")\n\n\ndef _get_allow_bytes_flag():\n \"\"\"\n Registers and returns the ALLOW_BYTES flag.\n \"\"\"\n import doctest\n\n return doctest.register_optionflag(\"ALLOW_BYTES\")\n\n\ndef _get_report_choice(key):\n \"\"\"\n This function returns the actual `doctest` module flag value, we want to do it as late as possible to avoid\n importing `doctest` and all its dependencies when parsing options, as it adds overhead and breaks tests.\n \"\"\"\n import doctest\n\n return {\n DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF,\n DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF,\n DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF,\n DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE,\n DOCTEST_REPORT_CHOICE_NONE: 0,\n }[key]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__fix_spoof_python2_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/doctest.py__fix_spoof_python2_", "embedding": null, "metadata": {"file_path": "src/_pytest/doctest.py", "file_name": "doctest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 541, "end_line": 573, "span_ids": ["_fix_spoof_python2.UnicodeSpoof", "doctest_namespace", "_fix_spoof_python2.UnicodeSpoof.getvalue", "_fix_spoof_python2"], "tokens": 230}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _fix_spoof_python2(runner, encoding):\n \"\"\"\n Installs a \"SpoofOut\" into the given DebugRunner so it properly deals with unicode output. This\n should patch only doctests for text files because they don't have a way to declare their\n encoding. Doctests in docstrings from Python modules don't have the same problem given that\n Python already decoded the strings.\n\n This fixes the problem related in issue #2434.\n \"\"\"\n from _pytest.compat import _PY2\n\n if not _PY2:\n return\n\n from doctest import _SpoofOut\n\n class UnicodeSpoof(_SpoofOut):\n def getvalue(self):\n result = _SpoofOut.getvalue(self)\n if encoding and isinstance(result, bytes):\n result = result.decode(encoding)\n return result\n\n runner._fakeout = UnicodeSpoof()\n\n\n@pytest.fixture(scope=\"session\")\ndef doctest_namespace():\n \"\"\"\n Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests.\n \"\"\"\n return dict()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_from___future___import_ab_get_scope_node.return.node_getparent_cls_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_from___future___import_ab_get_scope_node.return.node_getparent_cls_", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 110, "span_ids": ["impl", "scopeproperty", "pytest_sessionstart", "get_scope_package", "PseudoFixtureDef", "imports", "get_scope_node"], "tokens": 697}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport inspect\nimport itertools\nimport sys\nimport warnings\nfrom collections import defaultdict\nfrom collections import deque\nfrom collections import OrderedDict\n\nimport attr\nimport py\nimport six\n\nimport _pytest\nfrom _pytest import nodes\nfrom _pytest._code.code import FormattedExcinfo\nfrom _pytest._code.code import TerminalRepr\nfrom _pytest.compat import _format_args\nfrom _pytest.compat import _PytestWrapper\nfrom _pytest.compat import exc_clear\nfrom _pytest.compat import FuncargnamesCompatAttr\nfrom _pytest.compat import get_real_func\nfrom _pytest.compat import get_real_method\nfrom _pytest.compat import getfslineno\nfrom _pytest.compat import getfuncargnames\nfrom _pytest.compat import getimfunc\nfrom _pytest.compat import getlocation\nfrom _pytest.compat import is_generator\nfrom _pytest.compat import isclass\nfrom _pytest.compat import NOTSET\nfrom _pytest.compat import safe_getattr\nfrom _pytest.deprecated import FIXTURE_FUNCTION_CALL\nfrom _pytest.deprecated import FIXTURE_NAMED_REQUEST\nfrom _pytest.outcomes import fail\nfrom _pytest.outcomes import TEST_OUTCOME\n\n\n@attr.s(frozen=True)\nclass PseudoFixtureDef(object):\n cached_result = attr.ib()\n scope = attr.ib()\n\n\ndef pytest_sessionstart(session):\n import _pytest.python\n import _pytest.nodes\n\n scopename2class.update(\n {\n \"package\": _pytest.python.Package,\n \"class\": _pytest.python.Class,\n \"module\": _pytest.python.Module,\n \"function\": _pytest.nodes.Item,\n \"session\": _pytest.main.Session,\n }\n )\n session._fixturemanager = FixtureManager(session)\n\n\nscopename2class = {}\n\n\nscope2props = dict(session=())\nscope2props[\"package\"] = (\"fspath\",)\nscope2props[\"module\"] = (\"fspath\", \"module\")\nscope2props[\"class\"] = scope2props[\"module\"] + (\"cls\",)\nscope2props[\"instance\"] = scope2props[\"class\"] + (\"instance\",)\nscope2props[\"function\"] = scope2props[\"instance\"] + (\"function\", \"keywords\")\n\n\ndef scopeproperty(name=None, doc=None):\n def decoratescope(func):\n scopename = name or func.__name__\n\n def provide(self):\n if func.__name__ in scope2props[self.scope]:\n return func(self)\n raise AttributeError(\n \"%s not available in %s-scoped context\" % (scopename, self.scope)\n )\n\n return property(provide, None, None, func.__doc__)\n\n return decoratescope\n\n\ndef get_scope_package(node, fixturedef):\n import pytest\n\n cls = pytest.Package\n current = node\n fixture_package_name = \"%s/%s\" % (fixturedef.baseid, \"__init__.py\")\n while current and (\n type(current) is not cls or fixture_package_name != current.nodeid\n ):\n current = current.parent\n if current is None:\n return node.session\n return current\n\n\ndef get_scope_node(node, scope):\n cls = scopename2class.get(scope)\n if cls is None:\n raise ValueError(\"unknown scope\")\n return node.getparent(cls)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_add_funcarg_pseudo_fixture_def_add_funcarg_pseudo_fixture_def.for_argname_valuelist_in.if_node_and_argname_in_no.else_.if_node_is_not_None_.node__name2pseudofixtured": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_add_funcarg_pseudo_fixture_def_add_funcarg_pseudo_fixture_def.for_argname_valuelist_in.if_node_and_argname_in_no.else_.if_node_is_not_None_.node__name2pseudofixtured", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 113, "end_line": 168, "span_ids": ["add_funcarg_pseudo_fixture_def"], "tokens": 607}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):\n # this function will transform all collected calls to a functions\n # if they use direct funcargs (i.e. direct parametrization)\n # because we want later test execution to be able to rely on\n # an existing FixtureDef structure for all arguments.\n # XXX we can probably avoid this algorithm if we modify CallSpec2\n # to directly care for creating the fixturedefs within its methods.\n if not metafunc._calls[0].funcargs:\n return # this function call does not have direct parametrization\n # collect funcargs of all callspecs into a list of values\n arg2params = {}\n arg2scope = {}\n for callspec in metafunc._calls:\n for argname, argvalue in callspec.funcargs.items():\n assert argname not in callspec.params\n callspec.params[argname] = argvalue\n arg2params_list = arg2params.setdefault(argname, [])\n callspec.indices[argname] = len(arg2params_list)\n arg2params_list.append(argvalue)\n if argname not in arg2scope:\n scopenum = callspec._arg2scopenum.get(argname, scopenum_function)\n arg2scope[argname] = scopes[scopenum]\n callspec.funcargs.clear()\n\n # register artificial FixtureDef's so that later at test execution\n # time we can rely on a proper FixtureDef to exist for fixture setup.\n arg2fixturedefs = metafunc._arg2fixturedefs\n for argname, valuelist in arg2params.items():\n # if we have a scope that is higher than function we need\n # to make sure we only ever create an according fixturedef on\n # a per-scope basis. We thus store and cache the fixturedef on the\n # node related to the scope.\n scope = arg2scope[argname]\n node = None\n if scope != \"function\":\n node = get_scope_node(collector, scope)\n if node is None:\n assert scope == \"class\" and isinstance(collector, _pytest.python.Module)\n # use module-level collector for class-scope (for now)\n node = collector\n if node and argname in node._name2pseudofixturedef:\n arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]\n else:\n fixturedef = FixtureDef(\n fixturemanager,\n \"\",\n argname,\n get_direct_param_fixture_func,\n arg2scope[argname],\n valuelist,\n False,\n False,\n )\n arg2fixturedefs[argname] = [fixturedef]\n if node is not None:\n node._name2pseudofixturedef[argname] = fixturedef", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_getfixturemarker_get_parametrized_fixture_keys.try_.else_.for_argname_param_index_.yield_key": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_getfixturemarker_get_parametrized_fixture_keys.try_.else_.for_argname_param_index_.yield_key", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 171, "end_line": 205, "span_ids": ["getfixturemarker", "get_parametrized_fixture_keys"], "tokens": 327}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def getfixturemarker(obj):\n \"\"\" return fixturemarker or None if it doesn't exist or raised\n exceptions.\"\"\"\n try:\n return getattr(obj, \"_pytestfixturefunction\", None)\n except TEST_OUTCOME:\n # some objects raise errors like request (from flask import request)\n # we don't expect them to be fixture functions\n return None\n\n\ndef get_parametrized_fixture_keys(item, scopenum):\n \"\"\" return list of keys for all parametrized arguments which match\n the specified scope. \"\"\"\n assert scopenum < scopenum_function # function\n try:\n cs = item.callspec\n except AttributeError:\n pass\n else:\n # cs.indices.items() is random order of argnames. Need to\n # sort this so that different calls to\n # get_parametrized_fixture_keys will be deterministic.\n for argname, param_index in sorted(cs.indices.items()):\n if cs._arg2scopenum[argname] != scopenum:\n continue\n if scopenum == 0: # session\n key = (argname, param_index)\n elif scopenum == 1: # package\n key = (argname, param_index, item.fspath.dirpath())\n elif scopenum == 2: # module\n key = (argname, param_index, item.fspath)\n elif scopenum == 3: # class\n key = (argname, param_index, item.fspath, item.cls)\n yield key", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py__algorithm_for_sorting_o_fix_cache_order.for_scopenum_in_range_0_.for_key_in_argkeys_cache_.items_by_argkey_scopenum_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py__algorithm_for_sorting_o_fix_cache_order.for_scopenum_in_range_0_.for_key_in_argkeys_cache_.items_by_argkey_scopenum_", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 208, "end_line": 233, "span_ids": ["fix_cache_order", "reorder_items", "get_parametrized_fixture_keys"], "tokens": 261}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# algorithm for sorting on a per-parametrized resource setup basis\n# it is called for scopenum==0 (session) first and performs sorting\n# down to the lower scopes such as to minimize number of \"high scope\"\n# setups and teardowns\n\n\ndef reorder_items(items):\n argkeys_cache = {}\n items_by_argkey = {}\n for scopenum in range(0, scopenum_function):\n argkeys_cache[scopenum] = d = {}\n items_by_argkey[scopenum] = item_d = defaultdict(deque)\n for item in items:\n keys = OrderedDict.fromkeys(get_parametrized_fixture_keys(item, scopenum))\n if keys:\n d[item] = keys\n for key in keys:\n item_d[key].append(item)\n items = OrderedDict.fromkeys(items)\n return list(reorder_items_atscope(items, argkeys_cache, items_by_argkey, 0))\n\n\ndef fix_cache_order(item, argkeys_cache, items_by_argkey):\n for scopenum in range(0, scopenum_function):\n for key in argkeys_cache[scopenum].get(item, []):\n items_by_argkey[scopenum][key].appendleft(item)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_reorder_items_atscope_reorder_items_atscope.return.items_done": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_reorder_items_atscope_reorder_items_atscope.return.items_done", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 236, "end_line": 273, "span_ids": ["reorder_items_atscope"], "tokens": 361}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def reorder_items_atscope(items, argkeys_cache, items_by_argkey, scopenum):\n if scopenum >= scopenum_function or len(items) < 3:\n return items\n ignore = set()\n items_deque = deque(items)\n items_done = OrderedDict()\n scoped_items_by_argkey = items_by_argkey[scopenum]\n scoped_argkeys_cache = argkeys_cache[scopenum]\n while items_deque:\n no_argkey_group = OrderedDict()\n slicing_argkey = None\n while items_deque:\n item = items_deque.popleft()\n if item in items_done or item in no_argkey_group:\n continue\n argkeys = OrderedDict.fromkeys(\n k for k in scoped_argkeys_cache.get(item, []) if k not in ignore\n )\n if not argkeys:\n no_argkey_group[item] = None\n else:\n slicing_argkey, _ = argkeys.popitem()\n # we don't have to remove relevant items from later in the deque because they'll just be ignored\n matching_items = [\n i for i in scoped_items_by_argkey[slicing_argkey] if i in items\n ]\n for i in reversed(matching_items):\n fix_cache_order(i, argkeys_cache, items_by_argkey)\n items_deque.appendleft(i)\n break\n if no_argkey_group:\n no_argkey_group = reorder_items_atscope(\n no_argkey_group, argkeys_cache, items_by_argkey, scopenum + 1\n )\n for item in no_argkey_group:\n items_done[item] = None\n ignore.add(slicing_argkey)\n return items_done", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_fillfixtures_get_direct_param_fixture_func.return.request_param": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_fillfixtures_get_direct_param_fixture_func.return.request_param", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 276, "end_line": 299, "span_ids": ["get_direct_param_fixture_func", "fillfixtures"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fillfixtures(function):\n \"\"\" fill missing funcargs for a test function. \"\"\"\n try:\n request = function._request\n except AttributeError:\n # XXX this special code path is only expected to execute\n # with the oejskit plugin. It uses classes with funcargs\n # and we thus have to work a bit to allow this.\n fm = function.session._fixturemanager\n fi = fm.getfixtureinfo(function.parent, function.obj, None)\n function._fixtureinfo = fi\n request = function._request = FixtureRequest(function)\n request._fillfixtures()\n # prune out funcargs for jstests\n newfuncargs = {}\n for name in fi.argnames:\n newfuncargs[name] = function.funcargs[name]\n function.funcargs = newfuncargs\n else:\n request._fillfixtures()\n\n\ndef get_direct_param_fixture_func(request):\n return request.param", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FuncFixtureInfo_FuncFixtureInfo.prune_dependency_tree.self_names_closure_s": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FuncFixtureInfo_FuncFixtureInfo.prune_dependency_tree.self_names_closure_s", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 302, "end_line": 338, "span_ids": ["FuncFixtureInfo.prune_dependency_tree", "FuncFixtureInfo"], "tokens": 376}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s(slots=True)\nclass FuncFixtureInfo(object):\n # original function argument names\n argnames = attr.ib(type=tuple)\n # argnames that function immediately requires. These include argnames +\n # fixture names specified via usefixtures and via autouse=True in fixture\n # definitions.\n initialnames = attr.ib(type=tuple)\n names_closure = attr.ib() # List[str]\n name2fixturedefs = attr.ib() # List[str, List[FixtureDef]]\n\n def prune_dependency_tree(self):\n \"\"\"Recompute names_closure from initialnames and name2fixturedefs\n\n Can only reduce names_closure, which means that the new closure will\n always be a subset of the old one. The order is preserved.\n\n This method is needed because direct parametrization may shadow some\n of the fixtures that were included in the originally built dependency\n tree. In this way the dependency tree can get pruned, and the closure\n of argnames may get reduced.\n \"\"\"\n closure = set()\n working_set = set(self.initialnames)\n while working_set:\n argname = working_set.pop()\n # argname may be smth not included in the original names_closure,\n # in which case we ignore it. This currently happens with pseudo\n # FixtureDefs which wrap 'get_direct_param_fixture_func(request)'.\n # So they introduce the new dependency 'request' which might have\n # been missing in the original tree (closure).\n if argname not in closure and argname in self.names_closure:\n closure.add(argname)\n if argname in self.name2fixturedefs:\n working_set.update(self.name2fixturedefs[argname][-1].argnames)\n\n self.names_closure[:] = sorted(closure, key=self.names_closure.index)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureRequest_FixtureRequest.node.return.self__getscopeitem_self_s": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureRequest_FixtureRequest.node.return.self__getscopeitem_self_s", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 341, "end_line": 371, "span_ids": ["FixtureRequest", "FixtureRequest.node", "FixtureRequest.fixturenames"], "tokens": 272}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FixtureRequest(FuncargnamesCompatAttr):\n \"\"\" A request for a fixture from a test or fixture function.\n\n A request object gives access to the requesting test context\n and has an optional ``param`` attribute in case\n the fixture is parametrized indirectly.\n \"\"\"\n\n def __init__(self, pyfuncitem):\n self._pyfuncitem = pyfuncitem\n #: fixture for which this request is being performed\n self.fixturename = None\n #: Scope string, one of \"function\", \"class\", \"module\", \"session\"\n self.scope = \"function\"\n self._fixture_defs = {} # argname -> FixtureDef\n fixtureinfo = pyfuncitem._fixtureinfo\n self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()\n self._arg2index = {}\n self._fixturemanager = pyfuncitem.session._fixturemanager\n\n @property\n def fixturenames(self):\n \"\"\"names of all active fixtures in this request\"\"\"\n result = list(self._pyfuncitem._fixtureinfo.names_closure)\n result.extend(set(self._fixture_defs).difference(result))\n return result\n\n @property\n def node(self):\n \"\"\" underlying collection node (depends on current request scope)\"\"\"\n return self._getscopeitem(self.scope)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureRequest._getnextfixturedef_FixtureRequest._getnextfixturedef.return.fixturedefs_index_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureRequest._getnextfixturedef_FixtureRequest._getnextfixturedef.return.fixturedefs_index_", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 373, "end_line": 387, "span_ids": ["FixtureRequest._getnextfixturedef"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FixtureRequest(FuncargnamesCompatAttr):\n\n def _getnextfixturedef(self, argname):\n fixturedefs = self._arg2fixturedefs.get(argname, None)\n if fixturedefs is None:\n # we arrive here because of a dynamic call to\n # getfixturevalue(argname) usage which was naturally\n # not known at parsing/collection time\n parentid = self._pyfuncitem.parent.nodeid\n fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid)\n self._arg2fixturedefs[argname] = fixturedefs\n # fixturedefs list is immutable so we maintain a decreasing index\n index = self._arg2index.get(argname, 0) - 1\n if fixturedefs is None or (-index > len(fixturedefs)):\n raise FixtureLookupError(argname, self)\n self._arg2index[argname] = index\n return fixturedefs[index]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureRequest.config_FixtureRequest.getfuncargvalue.return.self_getfixturevalue_argn": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureRequest.config_FixtureRequest.getfuncargvalue.return.self_getfixturevalue_argn", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 389, "end_line": 485, "span_ids": ["FixtureRequest.module", "FixtureRequest.keywords", "FixtureRequest.cls", "FixtureRequest.fspath", "FixtureRequest.config", "FixtureRequest._fillfixtures", "FixtureRequest.getfuncargvalue", "FixtureRequest.instance", "FixtureRequest._addfinalizer", "FixtureRequest.addfinalizer", "FixtureRequest.applymarker", "FixtureRequest.getfixturevalue", "FixtureRequest.function", "FixtureRequest.session", "FixtureRequest.raiseerror"], "tokens": 779}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FixtureRequest(FuncargnamesCompatAttr):\n\n @property\n def config(self):\n \"\"\" the pytest config object associated with this request. \"\"\"\n return self._pyfuncitem.config\n\n @scopeproperty()\n def function(self):\n \"\"\" test function object if the request has a per-function scope. \"\"\"\n return self._pyfuncitem.obj\n\n @scopeproperty(\"class\")\n def cls(self):\n \"\"\" class (can be None) where the test function was collected. \"\"\"\n clscol = self._pyfuncitem.getparent(_pytest.python.Class)\n if clscol:\n return clscol.obj\n\n @property\n def instance(self):\n \"\"\" instance (can be None) on which test function was collected. \"\"\"\n # unittest support hack, see _pytest.unittest.TestCaseFunction\n try:\n return self._pyfuncitem._testcase\n except AttributeError:\n function = getattr(self, \"function\", None)\n return getattr(function, \"__self__\", None)\n\n @scopeproperty()\n def module(self):\n \"\"\" python module object where the test function was collected. \"\"\"\n return self._pyfuncitem.getparent(_pytest.python.Module).obj\n\n @scopeproperty()\n def fspath(self):\n \"\"\" the file system path of the test module which collected this test. \"\"\"\n return self._pyfuncitem.fspath\n\n @property\n def keywords(self):\n \"\"\" keywords/markers dictionary for the underlying node. \"\"\"\n return self.node.keywords\n\n @property\n def session(self):\n \"\"\" pytest session object. \"\"\"\n return self._pyfuncitem.session\n\n def addfinalizer(self, finalizer):\n \"\"\" add finalizer/teardown function to be called after the\n last test within the requesting test context finished\n execution. \"\"\"\n # XXX usually this method is shadowed by fixturedef specific ones\n self._addfinalizer(finalizer, scope=self.scope)\n\n def _addfinalizer(self, finalizer, scope):\n colitem = self._getscopeitem(scope)\n self._pyfuncitem.session._setupstate.addfinalizer(\n finalizer=finalizer, colitem=colitem\n )\n\n def applymarker(self, marker):\n \"\"\" Apply a marker to a single test function invocation.\n This method is useful if you don't want to have a keyword/marker\n on all function invocations.\n\n :arg marker: a :py:class:`_pytest.mark.MarkDecorator` object\n created by a call to ``pytest.mark.NAME(...)``.\n \"\"\"\n self.node.add_marker(marker)\n\n def raiseerror(self, msg):\n \"\"\" raise a FixtureLookupError with the given message. \"\"\"\n raise self._fixturemanager.FixtureLookupError(None, self, msg)\n\n def _fillfixtures(self):\n item = self._pyfuncitem\n fixturenames = getattr(item, \"fixturenames\", self.fixturenames)\n for argname in fixturenames:\n if argname not in item.funcargs:\n item.funcargs[argname] = self.getfixturevalue(argname)\n\n def getfixturevalue(self, argname):\n \"\"\" Dynamically run a named fixture function.\n\n Declaring fixtures via function argument is recommended where possible.\n But if you can only decide whether to use another fixture at test\n setup time, you may use this function to retrieve it inside a fixture\n or test function body.\n \"\"\"\n return self._get_active_fixturedef(argname).cached_result[0]\n\n def getfuncargvalue(self, argname):\n \"\"\" Deprecated, use getfixturevalue. \"\"\"\n from _pytest import deprecated\n\n warnings.warn(deprecated.GETFUNCARGVALUE, stacklevel=2)\n return self.getfixturevalue(argname)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureRequest._get_active_fixturedef_FixtureRequest._get_fixturestack.while_1_.current.current__parent_request": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureRequest._get_active_fixturedef_FixtureRequest._get_fixturestack.while_1_.current.current__parent_request", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 487, "end_line": 514, "span_ids": ["FixtureRequest._get_active_fixturedef", "FixtureRequest._get_fixturestack"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FixtureRequest(FuncargnamesCompatAttr):\n\n def _get_active_fixturedef(self, argname):\n try:\n return self._fixture_defs[argname]\n except KeyError:\n try:\n fixturedef = self._getnextfixturedef(argname)\n except FixtureLookupError:\n if argname == \"request\":\n cached_result = (self, [0], None)\n scope = \"function\"\n return PseudoFixtureDef(cached_result, scope)\n raise\n # remove indent to prevent the python3 exception\n # from leaking into the call\n self._compute_fixture_value(fixturedef)\n self._fixture_defs[argname] = fixturedef\n return fixturedef\n\n def _get_fixturestack(self):\n current = self\n values = []\n while 1:\n fixturedef = getattr(current, \"_fixturedef\", None)\n if fixturedef is None:\n values.reverse()\n return values\n values.append(fixturedef)\n current = current._parent_request", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureRequest._compute_fixture_value_FixtureRequest._compute_fixture_value.None_1.finally_.self__schedule_finalizers": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureRequest._compute_fixture_value_FixtureRequest._compute_fixture_value.None_1.finally_.self__schedule_finalizers", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 516, "end_line": 588, "span_ids": ["FixtureRequest._compute_fixture_value"], "tokens": 651}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FixtureRequest(FuncargnamesCompatAttr):\n\n def _compute_fixture_value(self, fixturedef):\n \"\"\"\n Creates a SubRequest based on \"self\" and calls the execute method of the given fixturedef object. This will\n force the FixtureDef object to throw away any previous results and compute a new fixture value, which\n will be stored into the FixtureDef object itself.\n\n :param FixtureDef fixturedef:\n \"\"\"\n # prepare a subrequest object before calling fixture function\n # (latter managed by fixturedef)\n argname = fixturedef.argname\n funcitem = self._pyfuncitem\n scope = fixturedef.scope\n try:\n param = funcitem.callspec.getparam(argname)\n except (AttributeError, ValueError):\n param = NOTSET\n param_index = 0\n has_params = fixturedef.params is not None\n fixtures_not_supported = getattr(funcitem, \"nofuncargs\", False)\n if has_params and fixtures_not_supported:\n msg = (\n \"{name} does not support fixtures, maybe unittest.TestCase subclass?\\n\"\n \"Node id: {nodeid}\\n\"\n \"Function type: {typename}\"\n ).format(\n name=funcitem.name,\n nodeid=funcitem.nodeid,\n typename=type(funcitem).__name__,\n )\n fail(msg, pytrace=False)\n if has_params:\n frame = inspect.stack()[3]\n frameinfo = inspect.getframeinfo(frame[0])\n source_path = frameinfo.filename\n source_lineno = frameinfo.lineno\n source_path = py.path.local(source_path)\n if source_path.relto(funcitem.config.rootdir):\n source_path = source_path.relto(funcitem.config.rootdir)\n msg = (\n \"The requested fixture has no parameter defined for test:\\n\"\n \" {}\\n\\n\"\n \"Requested fixture '{}' defined in:\\n{}\"\n \"\\n\\nRequested here:\\n{}:{}\".format(\n funcitem.nodeid,\n fixturedef.argname,\n getlocation(fixturedef.func, funcitem.config.rootdir),\n source_path,\n source_lineno,\n )\n )\n fail(msg, pytrace=False)\n else:\n param_index = funcitem.callspec.indices[argname]\n # if a parametrize invocation set a scope it will override\n # the static scope defined with the fixture function\n paramscopenum = funcitem.callspec._arg2scopenum.get(argname)\n if paramscopenum is not None:\n scope = scopes[paramscopenum]\n\n subrequest = SubRequest(self, scope, param, param_index, fixturedef)\n\n # check if a higher-level scoped fixture accesses a lower level one\n subrequest._check_scope(argname, self.scope, scope)\n\n # clear sys.exc_info before invoking the fixture (python bug?)\n # if it's not explicitly cleared it will leak into the call\n exc_clear()\n try:\n # call the fixture function\n fixturedef.execute(request=subrequest)\n finally:\n self._schedule_finalizers(fixturedef, subrequest)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureRequest._schedule_finalizers_FixtureRequest._check_scope.if_scopemismatch_invoking.fail_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureRequest._schedule_finalizers_FixtureRequest._check_scope.if_scopemismatch_invoking.fail_", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 590, "end_line": 608, "span_ids": ["FixtureRequest._check_scope", "FixtureRequest._schedule_finalizers"], "tokens": 183}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FixtureRequest(FuncargnamesCompatAttr):\n\n def _schedule_finalizers(self, fixturedef, subrequest):\n # if fixture function failed it might have registered finalizers\n self.session._setupstate.addfinalizer(\n functools.partial(fixturedef.finish, request=subrequest), subrequest.node\n )\n\n def _check_scope(self, argname, invoking_scope, requested_scope):\n if argname == \"request\":\n return\n if scopemismatch(invoking_scope, requested_scope):\n # try to report something helpful\n lines = self._factorytraceback()\n fail(\n \"ScopeMismatch: You tried to access the %r scoped \"\n \"fixture %r with a %r scoped request object, \"\n \"involved factories\\n%s\"\n % ((requested_scope, argname, invoking_scope, \"\\n\".join(lines))),\n pytrace=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureRequest._factorytraceback_FixtureRequest.__repr__.return._FixtureRequest_for_r_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureRequest._factorytraceback_FixtureRequest.__repr__.return._FixtureRequest_for_r_", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 610, "end_line": 637, "span_ids": ["FixtureRequest.__repr__", "FixtureRequest._getscopeitem", "FixtureRequest._factorytraceback"], "tokens": 274}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FixtureRequest(FuncargnamesCompatAttr):\n\n def _factorytraceback(self):\n lines = []\n for fixturedef in self._get_fixturestack():\n factory = fixturedef.func\n fs, lineno = getfslineno(factory)\n p = self._pyfuncitem.session.fspath.bestrelpath(fs)\n args = _format_args(factory)\n lines.append(\"%s:%d: def %s%s\" % (p, lineno + 1, factory.__name__, args))\n return lines\n\n def _getscopeitem(self, scope):\n if scope == \"function\":\n # this might also be a non-function Item despite its attribute name\n return self._pyfuncitem\n if scope == \"package\":\n node = get_scope_package(self._pyfuncitem, self._fixturedef)\n else:\n node = get_scope_node(self._pyfuncitem, scope)\n if node is None and scope == \"class\":\n # fallback to function item itself\n node = self._pyfuncitem\n assert node, 'Could not obtain a node for scope \"{}\" for function {!r}'.format(\n scope, self._pyfuncitem\n )\n return node\n\n def __repr__(self):\n return \"\" % (self.node)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_SubRequest_SubRequest._schedule_finalizers.super_SubRequest_self__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_SubRequest_SubRequest._schedule_finalizers.super_SubRequest_self__", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 640, "end_line": 672, "span_ids": ["SubRequest._schedule_finalizers", "SubRequest.addfinalizer", "SubRequest", "SubRequest.__repr__"], "tokens": 320}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SubRequest(FixtureRequest):\n \"\"\" a sub request for handling getting a fixture from a\n test function/fixture. \"\"\"\n\n def __init__(self, request, scope, param, param_index, fixturedef):\n self._parent_request = request\n self.fixturename = fixturedef.argname\n if param is not NOTSET:\n self.param = param\n self.param_index = param_index\n self.scope = scope\n self._fixturedef = fixturedef\n self._pyfuncitem = request._pyfuncitem\n self._fixture_defs = request._fixture_defs\n self._arg2fixturedefs = request._arg2fixturedefs\n self._arg2index = request._arg2index\n self._fixturemanager = request._fixturemanager\n\n def __repr__(self):\n return \"\" % (self.fixturename, self._pyfuncitem)\n\n def addfinalizer(self, finalizer):\n self._fixturedef.addfinalizer(finalizer)\n\n def _schedule_finalizers(self, fixturedef, subrequest):\n # if the executing fixturedef was not explicitly requested in the argument list (via\n # getfixturevalue inside the fixture call) then ensure this fixture def will be finished\n # first\n if fixturedef.argname not in self.funcargnames:\n fixturedef.addfinalizer(\n functools.partial(self._fixturedef.finish, request=self)\n )\n super(SubRequest, self)._schedule_finalizers(fixturedef, subrequest)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_scopes_scope2index.try_.except_ValueError_.fail_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_scopes_scope2index.try_.except_ValueError_.fail_", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 675, "end_line": 695, "span_ids": ["scope2index", "impl:10", "scopemismatch"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "scopes = \"session package module class function\".split()\nscopenum_function = scopes.index(\"function\")\n\n\ndef scopemismatch(currentscope, newscope):\n return scopes.index(newscope) > scopes.index(currentscope)\n\n\ndef scope2index(scope, descr, where=None):\n \"\"\"Look up the index of ``scope`` and raise a descriptive value error\n if not defined.\n \"\"\"\n try:\n return scopes.index(scope)\n except ValueError:\n fail(\n \"{} {}got an unexpected scope value '{}'\".format(\n descr, \"from {} \".format(where) if where else \"\", scope\n ),\n pytrace=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureLookupError_FixtureLookupError.formatrepr.return.FixtureLookupErrorRepr_fs": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureLookupError_FixtureLookupError.formatrepr.return.FixtureLookupErrorRepr_fs", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 698, "end_line": 749, "span_ids": ["FixtureLookupError", "FixtureLookupError.formatrepr"], "tokens": 475}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FixtureLookupError(LookupError):\n \"\"\" could not return a requested Fixture (missing or invalid). \"\"\"\n\n def __init__(self, argname, request, msg=None):\n self.argname = argname\n self.request = request\n self.fixturestack = request._get_fixturestack()\n self.msg = msg\n\n def formatrepr(self):\n tblines = []\n addline = tblines.append\n stack = [self.request._pyfuncitem.obj]\n stack.extend(map(lambda x: x.func, self.fixturestack))\n msg = self.msg\n if msg is not None:\n # the last fixture raise an error, let's present\n # it at the requesting side\n stack = stack[:-1]\n for function in stack:\n fspath, lineno = getfslineno(function)\n try:\n lines, _ = inspect.getsourcelines(get_real_func(function))\n except (IOError, IndexError, TypeError):\n error_msg = \"file %s, line %s: source code not available\"\n addline(error_msg % (fspath, lineno + 1))\n else:\n addline(\"file %s, line %s\" % (fspath, lineno + 1))\n for i, line in enumerate(lines):\n line = line.rstrip()\n addline(\" \" + line)\n if line.lstrip().startswith(\"def\"):\n break\n\n if msg is None:\n fm = self.request._fixturemanager\n available = set()\n parentid = self.request._pyfuncitem.parent.nodeid\n for name, fixturedefs in fm._arg2fixturedefs.items():\n faclist = list(fm._matchfactories(fixturedefs, parentid))\n if faclist:\n available.add(name)\n if self.argname in available:\n msg = \" recursive dependency involving fixture '{}' detected\".format(\n self.argname\n )\n else:\n msg = \"fixture '{}' not found\".format(self.argname)\n msg += \"\\n available fixtures: {}\".format(\", \".join(sorted(available)))\n msg += \"\\n use 'pytest --fixtures [testpath]' for help on them.\"\n\n return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureLookupErrorRepr_FixtureLookupErrorRepr.toterminal.tw_line_s_d_self_f": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureLookupErrorRepr_FixtureLookupErrorRepr.toterminal.tw_line_s_d_self_f", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 752, "end_line": 776, "span_ids": ["FixtureLookupErrorRepr.toterminal", "FixtureLookupErrorRepr"], "tokens": 214}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FixtureLookupErrorRepr(TerminalRepr):\n def __init__(self, filename, firstlineno, tblines, errorstring, argname):\n self.tblines = tblines\n self.errorstring = errorstring\n self.filename = filename\n self.firstlineno = firstlineno\n self.argname = argname\n\n def toterminal(self, tw):\n # tw.line(\"FixtureLookupError: %s\" %(self.argname), red=True)\n for tbline in self.tblines:\n tw.line(tbline.rstrip())\n lines = self.errorstring.split(\"\\n\")\n if lines:\n tw.line(\n \"{} {}\".format(FormattedExcinfo.fail_marker, lines[0].strip()),\n red=True,\n )\n for line in lines[1:]:\n tw.line(\n \"{} {}\".format(FormattedExcinfo.flow_marker, line.strip()),\n red=True,\n )\n tw.line()\n tw.line(\"%s:%d\" % (self.filename, self.firstlineno + 1))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_fail_fixturefunc__teardown_yield_fixture.try_.else_.fail_fixturefunc_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_fail_fixturefunc__teardown_yield_fixture.try_.else_.fail_fixturefunc_", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 779, "end_line": 808, "span_ids": ["_teardown_yield_fixture", "fail_fixturefunc", "call_fixture_func"], "tokens": 245}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fail_fixturefunc(fixturefunc, msg):\n fs, lineno = getfslineno(fixturefunc)\n location = \"%s:%s\" % (fs, lineno + 1)\n source = _pytest._code.Source(fixturefunc)\n fail(msg + \":\\n\\n\" + str(source.indent()) + \"\\n\" + location, pytrace=False)\n\n\ndef call_fixture_func(fixturefunc, request, kwargs):\n yieldctx = is_generator(fixturefunc)\n if yieldctx:\n it = fixturefunc(**kwargs)\n res = next(it)\n finalizer = functools.partial(_teardown_yield_fixture, fixturefunc, it)\n request.addfinalizer(finalizer)\n else:\n res = fixturefunc(**kwargs)\n return res\n\n\ndef _teardown_yield_fixture(fixturefunc, it):\n \"\"\"Executes the teardown of a fixture function by advancing the iterator after the\n yield and ensure the iteration ends (if not it means there is more than one yield in the function)\"\"\"\n try:\n next(it)\n except StopIteration:\n pass\n else:\n fail_fixturefunc(\n fixturefunc, \"yield_fixture function has more than one 'yield'\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureDef_FixtureDef.addfinalizer.self__finalizers_append_f": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureDef_FixtureDef.addfinalizer.self__finalizers_append_f", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 811, "end_line": 843, "span_ids": ["FixtureDef", "FixtureDef.addfinalizer"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FixtureDef(object):\n \"\"\" A container for a factory definition. \"\"\"\n\n def __init__(\n self,\n fixturemanager,\n baseid,\n argname,\n func,\n scope,\n params,\n unittest=False,\n ids=None,\n ):\n self._fixturemanager = fixturemanager\n self.baseid = baseid or \"\"\n self.has_location = baseid is not None\n self.func = func\n self.argname = argname\n self.scope = scope\n self.scopenum = scope2index(\n scope or \"function\",\n descr=\"Fixture '{}'\".format(func.__name__),\n where=baseid,\n )\n self.params = params\n self.argnames = getfuncargnames(func, is_method=unittest)\n self.unittest = unittest\n self.ids = ids\n self._finalizers = []\n\n def addfinalizer(self, finalizer):\n self._finalizers.append(finalizer)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureDef.finish_FixtureDef.finish.try_.finally_.self._finalizers._": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureDef.finish_FixtureDef.finish.try_.finally_.self._finalizers._", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 845, "end_line": 870, "span_ids": ["FixtureDef.finish"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FixtureDef(object):\n\n def finish(self, request):\n exceptions = []\n try:\n while self._finalizers:\n try:\n func = self._finalizers.pop()\n func()\n except: # noqa\n exceptions.append(sys.exc_info())\n if exceptions:\n e = exceptions[0]\n del (\n exceptions\n ) # ensure we don't keep all frames alive because of the traceback\n six.reraise(*e)\n\n finally:\n hook = self._fixturemanager.session.gethookproxy(request.node.fspath)\n hook.pytest_fixture_post_finalizer(fixturedef=self, request=request)\n # even if finalization fails, we invalidate\n # the cached fixture value and remove\n # all finalizers because they may be bound methods which will\n # keep instances alive\n if hasattr(self, \"cached_result\"):\n del self.cached_result\n self._finalizers = []", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureDef.execute_FixtureDef.__repr__.return._FixtureDef_argname_r_s": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureDef.execute_FixtureDef.__repr__.return._FixtureDef_argname_r_s", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 872, "end_line": 902, "span_ids": ["FixtureDef.__repr__", "FixtureDef.execute"], "tokens": 264}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FixtureDef(object):\n\n def execute(self, request):\n # get required arguments and register our own finish()\n # with their finalization\n for argname in self.argnames:\n fixturedef = request._get_active_fixturedef(argname)\n if argname != \"request\":\n fixturedef.addfinalizer(functools.partial(self.finish, request=request))\n\n my_cache_key = request.param_index\n cached_result = getattr(self, \"cached_result\", None)\n if cached_result is not None:\n result, cache_key, err = cached_result\n if my_cache_key == cache_key:\n if err is not None:\n six.reraise(*err)\n else:\n return result\n # we have a previous but differently parametrized fixture instance\n # so we need to tear it down before creating a new one\n self.finish(request)\n assert not hasattr(self, \"cached_result\")\n\n hook = self._fixturemanager.session.gethookproxy(request.node.fspath)\n return hook.pytest_fixture_setup(fixturedef=self, request=request)\n\n def __repr__(self):\n return \"\" % (\n self.argname,\n self.scope,\n self.baseid,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_resolve_fixture_function_resolve_fixture_function.return.fixturefunc": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_resolve_fixture_function_resolve_fixture_function.return.fixturefunc", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 905, "end_line": 922, "span_ids": ["resolve_fixture_function"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def resolve_fixture_function(fixturedef, request):\n \"\"\"Gets the actual callable that can be called to obtain the fixture value, dealing with unittest-specific\n instances and bound methods.\n \"\"\"\n fixturefunc = fixturedef.func\n if fixturedef.unittest:\n if request.instance is not None:\n # bind the unbound method to the TestCase instance\n fixturefunc = fixturedef.func.__get__(request.instance)\n else:\n # the fixture function needs to be bound to the actual\n # request.instance so that code working with \"fixturedef\" behaves\n # as expected.\n if request.instance is not None:\n fixturefunc = getimfunc(fixturedef.func)\n if fixturefunc != fixturedef.func:\n fixturefunc = fixturefunc.__get__(request.instance)\n return fixturefunc", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_pytest_fixture_setup_pytest_fixture_setup.return.result": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_pytest_fixture_setup_pytest_fixture_setup.return.result", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 925, "end_line": 942, "span_ids": ["pytest_fixture_setup"], "tokens": 165}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_fixture_setup(fixturedef, request):\n \"\"\" Execution of fixture setup. \"\"\"\n kwargs = {}\n for argname in fixturedef.argnames:\n fixdef = request._get_active_fixturedef(argname)\n result, arg_cache_key, exc = fixdef.cached_result\n request._check_scope(argname, request.scope, fixdef.scope)\n kwargs[argname] = result\n\n fixturefunc = resolve_fixture_function(fixturedef, request)\n my_cache_key = request.param_index\n try:\n result = call_fixture_func(fixturefunc, request, kwargs)\n except TEST_OUTCOME:\n fixturedef.cached_result = (None, my_cache_key, sys.exc_info())\n raise\n fixturedef.cached_result = (result, my_cache_key, None)\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py__ensure_immutable_ids_wrap_function_to_error_out_if_called_directly.return.result": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py__ensure_immutable_ids_wrap_function_to_error_out_if_called_directly.return.result", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 945, "end_line": 969, "span_ids": ["_ensure_immutable_ids", "wrap_function_to_error_out_if_called_directly"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _ensure_immutable_ids(ids):\n if ids is None:\n return\n if callable(ids):\n return ids\n return tuple(ids)\n\n\ndef wrap_function_to_error_out_if_called_directly(function, fixture_marker):\n \"\"\"Wrap the given fixture function so we can raise an error about it being called directly,\n instead of used as an argument in a test function.\n \"\"\"\n message = FIXTURE_FUNCTION_CALL.format(\n name=fixture_marker.name or function.__name__\n )\n\n @six.wraps(function)\n def result(*args, **kwargs):\n fail(message, pytrace=False)\n\n # keep reference to the original function in our own custom attribute so we don't unwrap\n # further than this point and lose useful wrappings like @mock.patch (#3774)\n result.__pytest_wrapped__ = _PytestWrapper(function)\n\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureFunctionMarker_FixtureFunctionMarker.__call__.return.function": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureFunctionMarker_FixtureFunctionMarker.__call__.return.function", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 972, "end_line": 995, "span_ids": ["FixtureFunctionMarker", "FixtureFunctionMarker.__call__"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s(frozen=True)\nclass FixtureFunctionMarker(object):\n scope = attr.ib()\n params = attr.ib(converter=attr.converters.optional(tuple))\n autouse = attr.ib(default=False)\n ids = attr.ib(default=None, converter=_ensure_immutable_ids)\n name = attr.ib(default=None)\n\n def __call__(self, function):\n if isclass(function):\n raise ValueError(\"class fixtures not supported (maybe in the future)\")\n\n if getattr(function, \"_pytestfixturefunction\", False):\n raise ValueError(\n \"fixture is being applied more than once to the same function\"\n )\n\n function = wrap_function_to_error_out_if_called_directly(function, self)\n\n name = self.name or function.__name__\n if name == \"request\":\n warnings.warn(FIXTURE_NAMED_REQUEST)\n function._pytestfixturefunction = self\n return function", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_fixture_fixture.return.FixtureFunctionMarker_sco": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_fixture_fixture.return.FixtureFunctionMarker_sco", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 998, "end_line": 1049, "span_ids": ["fixture"], "tokens": 550}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fixture(scope=\"function\", params=None, autouse=False, ids=None, name=None):\n \"\"\"Decorator to mark a fixture factory function.\n\n This decorator can be used, with or without parameters, to define a\n fixture function.\n\n The name of the fixture function can later be referenced to cause its\n invocation ahead of running tests: test\n modules or classes can use the ``pytest.mark.usefixtures(fixturename)``\n marker.\n\n Test functions can directly use fixture names as input\n arguments in which case the fixture instance returned from the fixture\n function will be injected.\n\n Fixtures can provide their values to test functions using ``return`` or ``yield``\n statements. When using ``yield`` the code block after the ``yield`` statement is executed\n as teardown code regardless of the test outcome, and must yield exactly once.\n\n :arg scope: the scope for which this fixture is shared, one of\n ``\"function\"`` (default), ``\"class\"``, ``\"module\"``,\n ``\"package\"`` or ``\"session\"``.\n\n ``\"package\"`` is considered **experimental** at this time.\n\n :arg params: an optional list of parameters which will cause multiple\n invocations of the fixture function and all of the tests\n using it.\n The current parameter is available in ``request.param``.\n\n :arg autouse: if True, the fixture func is activated for all tests that\n can see it. If False (the default) then an explicit\n reference is needed to activate the fixture.\n\n :arg ids: list of string ids each corresponding to the params\n so that they are part of the test id. If no ids are provided\n they will be generated automatically from the params.\n\n :arg name: the name of the fixture. This defaults to the name of the\n decorated function. If a fixture is used in the same module in\n which it is defined, the function name of the fixture will be\n shadowed by the function arg that requests the fixture; one way\n to resolve this is to name the decorated function\n ``fixture_`` and then use\n ``@pytest.fixture(name='')``.\n \"\"\"\n if callable(scope) and params is None and autouse is False:\n # direct decoration\n return FixtureFunctionMarker(\"function\", params, autouse, name=name)(scope)\n if params is not None and not isinstance(params, (list, tuple)):\n params = list(params)\n return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_yield_fixture_pytestconfig.return.request_config": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_yield_fixture_pytestconfig.return.request_config", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1052, "end_line": 1075, "span_ids": ["impl:14", "pytestconfig", "yield_fixture"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def yield_fixture(scope=\"function\", params=None, autouse=False, ids=None, name=None):\n \"\"\" (return a) decorator to mark a yield-fixture factory function.\n\n .. deprecated:: 3.0\n Use :py:func:`pytest.fixture` directly instead.\n \"\"\"\n return fixture(scope=scope, params=params, autouse=autouse, ids=ids, name=name)\n\n\ndefaultfuncargprefixmarker = fixture()\n\n\n@fixture(scope=\"session\")\ndef pytestconfig(request):\n \"\"\"Session-scoped fixture that returns the :class:`_pytest.config.Config` object.\n\n Example::\n\n def test_foo(pytestconfig):\n if pytestconfig.getoption(\"verbose\") > 0:\n ...\n\n \"\"\"\n return request.config", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureManager_FixtureManager.__init__.session_config_pluginmana": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureManager_FixtureManager.__init__.session_config_pluginmana", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1078, "end_line": 1120, "span_ids": ["FixtureManager"], "tokens": 357}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FixtureManager(object):\n \"\"\"\n pytest fixtures definitions and information is stored and managed\n from this class.\n\n During collection fm.parsefactories() is called multiple times to parse\n fixture function definitions into FixtureDef objects and internal\n data structures.\n\n During collection of test functions, metafunc-mechanics instantiate\n a FuncFixtureInfo object which is cached per node/func-name.\n This FuncFixtureInfo object is later retrieved by Function nodes\n which themselves offer a fixturenames attribute.\n\n The FuncFixtureInfo object holds information about fixtures and FixtureDefs\n relevant for a particular function. An initial list of fixtures is\n assembled like this:\n\n - ini-defined usefixtures\n - autouse-marked fixtures along the collection chain up from the function\n - usefixtures markers at module/class/function level\n - test function funcargs\n\n Subsequently the funcfixtureinfo.fixturenames attribute is computed\n as the closure of the fixtures needed to setup the initial fixtures,\n i. e. fixtures needed by fixture functions themselves are appended\n to the fixturenames list.\n\n Upon the test-setup phases all fixturenames are instantiated, retrieved\n by a lookup of their FuncFixtureInfo.\n \"\"\"\n\n FixtureLookupError = FixtureLookupError\n FixtureLookupErrorRepr = FixtureLookupErrorRepr\n\n def __init__(self, session):\n self.session = session\n self.config = session.config\n self._arg2fixturedefs = {}\n self._holderobjseen = set()\n self._arg2finish = {}\n self._nodeid_and_autousenames = [(\"\", self.config.getini(\"usefixtures\"))]\n session.config.pluginmanager.register(self, \"funcmanage\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureManager.getfixtureinfo_FixtureManager.getfixtureinfo.return.FuncFixtureInfo_argnames_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureManager.getfixtureinfo_FixtureManager.getfixtureinfo.return.FuncFixtureInfo_argnames_", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1122, "end_line": 1135, "span_ids": ["FixtureManager.getfixtureinfo"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FixtureManager(object):\n\n def getfixtureinfo(self, node, func, cls, funcargs=True):\n if funcargs and not getattr(node, \"nofuncargs\", False):\n argnames = getfuncargnames(func, cls=cls)\n else:\n argnames = ()\n usefixtures = itertools.chain.from_iterable(\n mark.args for mark in node.iter_markers(name=\"usefixtures\")\n )\n initialnames = tuple(usefixtures) + argnames\n fm = node.session._fixturemanager\n initialnames, names_closure, arg2fixturedefs = fm.getfixtureclosure(\n initialnames, node\n )\n return FuncFixtureInfo(argnames, initialnames, names_closure, arg2fixturedefs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureManager.pytest_plugin_registered_FixtureManager._getautousenames.return.autousenames": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureManager.pytest_plugin_registered_FixtureManager._getautousenames.return.autousenames", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1137, "end_line": 1165, "span_ids": ["FixtureManager._getautousenames", "FixtureManager.pytest_plugin_registered"], "tokens": 250}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FixtureManager(object):\n\n def pytest_plugin_registered(self, plugin):\n nodeid = None\n try:\n p = py.path.local(plugin.__file__).realpath()\n except AttributeError:\n pass\n else:\n # construct the base nodeid which is later used to check\n # what fixtures are visible for particular tests (as denoted\n # by their test id)\n if p.basename.startswith(\"conftest.py\"):\n nodeid = p.dirpath().relto(self.config.rootdir)\n if p.sep != nodes.SEP:\n nodeid = nodeid.replace(p.sep, nodes.SEP)\n\n self.parsefactories(plugin, nodeid)\n\n def _getautousenames(self, nodeid):\n \"\"\" return a tuple of fixture names to be used. \"\"\"\n autousenames = []\n for baseid, basenames in self._nodeid_and_autousenames:\n if nodeid.startswith(baseid):\n if baseid:\n i = len(baseid)\n nextchar = nodeid[i : i + 1]\n if nextchar and nextchar not in \":/\":\n continue\n autousenames.extend(basenames)\n return autousenames", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureManager.getfixtureclosure_FixtureManager.getfixtureclosure.return.initialnames_fixturename": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureManager.getfixtureclosure_FixtureManager.getfixtureclosure.return.initialnames_fixturename", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1167, "end_line": 1211, "span_ids": ["FixtureManager.getfixtureclosure"], "tokens": 387}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FixtureManager(object):\n\n def getfixtureclosure(self, fixturenames, parentnode):\n # collect the closure of all fixtures , starting with the given\n # fixturenames as the initial set. As we have to visit all\n # factory definitions anyway, we also return an arg2fixturedefs\n # mapping so that the caller can reuse it and does not have\n # to re-discover fixturedefs again for each fixturename\n # (discovering matching fixtures for a given name/node is expensive)\n\n parentid = parentnode.nodeid\n fixturenames_closure = self._getautousenames(parentid)\n\n def merge(otherlist):\n for arg in otherlist:\n if arg not in fixturenames_closure:\n fixturenames_closure.append(arg)\n\n merge(fixturenames)\n\n # at this point, fixturenames_closure contains what we call \"initialnames\",\n # which is a set of fixturenames the function immediately requests. We\n # need to return it as well, so save this.\n initialnames = tuple(fixturenames_closure)\n\n arg2fixturedefs = {}\n lastlen = -1\n while lastlen != len(fixturenames_closure):\n lastlen = len(fixturenames_closure)\n for argname in fixturenames_closure:\n if argname in arg2fixturedefs:\n continue\n fixturedefs = self.getfixturedefs(argname, parentid)\n if fixturedefs:\n arg2fixturedefs[argname] = fixturedefs\n merge(fixturedefs[-1].argnames)\n\n def sort_by_scope(arg_name):\n try:\n fixturedefs = arg2fixturedefs[arg_name]\n except KeyError:\n return scopes.index(\"function\")\n else:\n return fixturedefs[-1].scopenum\n\n fixturenames_closure.sort(key=sort_by_scope)\n return initialnames, fixturenames_closure, arg2fixturedefs", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureManager.pytest_generate_tests_FixtureManager.pytest_collection_modifyitems.items_reorder_items_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureManager.pytest_generate_tests_FixtureManager.pytest_collection_modifyitems.items_reorder_items_", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1213, "end_line": 1245, "span_ids": ["FixtureManager.pytest_collection_modifyitems", "FixtureManager.pytest_generate_tests"], "tokens": 247}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FixtureManager(object):\n\n def pytest_generate_tests(self, metafunc):\n for argname in metafunc.fixturenames:\n faclist = metafunc._arg2fixturedefs.get(argname)\n if faclist:\n fixturedef = faclist[-1]\n if fixturedef.params is not None:\n markers = list(metafunc.definition.iter_markers(\"parametrize\"))\n for parametrize_mark in markers:\n if \"argnames\" in parametrize_mark.kwargs:\n argnames = parametrize_mark.kwargs[\"argnames\"]\n else:\n argnames = parametrize_mark.args[0]\n\n if not isinstance(argnames, (tuple, list)):\n argnames = [\n x.strip() for x in argnames.split(\",\") if x.strip()\n ]\n if argname in argnames:\n break\n else:\n metafunc.parametrize(\n argname,\n fixturedef.params,\n indirect=True,\n scope=fixturedef.scope,\n ids=fixturedef.ids,\n )\n else:\n continue # will raise FixtureLookupError at setup time\n\n def pytest_collection_modifyitems(self, items):\n # separate parametrized setups\n items[:] = reorder_items(items)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureManager.parsefactories_FixtureManager.parsefactories.if_autousenames_.self__nodeid_and_autousen": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureManager.parsefactories_FixtureManager.parsefactories.if_autousenames_.self__nodeid_and_autousen", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1247, "end_line": 1305, "span_ids": ["FixtureManager.parsefactories"], "tokens": 469}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FixtureManager(object):\n\n def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):\n if nodeid is not NOTSET:\n holderobj = node_or_obj\n else:\n holderobj = node_or_obj.obj\n nodeid = node_or_obj.nodeid\n if holderobj in self._holderobjseen:\n return\n\n self._holderobjseen.add(holderobj)\n autousenames = []\n for name in dir(holderobj):\n # The attribute can be an arbitrary descriptor, so the attribute\n # access below can raise. safe_getatt() ignores such exceptions.\n obj = safe_getattr(holderobj, name, None)\n marker = getfixturemarker(obj)\n if not isinstance(marker, FixtureFunctionMarker):\n # magic globals with __getattr__ might have got us a wrong\n # fixture attribute\n continue\n\n if marker.name:\n name = marker.name\n\n # during fixture definition we wrap the original fixture function\n # to issue a warning if called directly, so here we unwrap it in order to not emit the warning\n # when pytest itself calls the fixture function\n if six.PY2 and unittest:\n # hack on Python 2 because of the unbound methods\n obj = get_real_func(obj)\n else:\n obj = get_real_method(obj, holderobj)\n\n fixture_def = FixtureDef(\n self,\n nodeid,\n name,\n obj,\n marker.scope,\n marker.params,\n unittest=unittest,\n ids=marker.ids,\n )\n\n faclist = self._arg2fixturedefs.setdefault(name, [])\n if fixture_def.has_location:\n faclist.append(fixture_def)\n else:\n # fixturedefs with no location are at the front\n # so this inserts the current fixturedef after the\n # existing fixturedefs from external plugins but\n # before the fixturedefs provided in conftests.\n i = len([f for f in faclist if not f.has_location])\n faclist.insert(i, fixture_def)\n if marker.autouse:\n autousenames.append(name)\n\n if autousenames:\n self._nodeid_and_autousenames.append((nodeid or \"\", autousenames))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureManager.getfixturedefs_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/fixtures.py_FixtureManager.getfixturedefs_", "embedding": null, "metadata": {"file_path": "src/_pytest/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1307, "end_line": 1325, "span_ids": ["FixtureManager._matchfactories", "FixtureManager.getfixturedefs"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FixtureManager(object):\n\n def getfixturedefs(self, argname, nodeid):\n \"\"\"\n Gets a list of fixtures which are applicable to the given node id.\n\n :param str argname: name of the fixture to search for\n :param str nodeid: full node id of the requesting test.\n :return: list[FixtureDef]\n \"\"\"\n try:\n fixturedefs = self._arg2fixturedefs[argname]\n except KeyError:\n return None\n return tuple(self._matchfactories(fixturedefs, nodeid))\n\n def _matchfactories(self, fixturedefs, nodeid):\n for fixturedef in fixturedefs:\n if nodes.ischildnode(fixturedef.baseid, nodeid):\n yield fixturedef", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/freeze_support.py___": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/freeze_support.py___", "embedding": null, "metadata": {"file_path": "src/_pytest/freeze_support.py", "file_name": "freeze_support.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 48, "span_ids": ["freeze_includes", "imports", "_iter_all_modules", "docstring"], "tokens": 273}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nProvides a function to report all internal modules for using freezing tools\npytest\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\ndef freeze_includes():\n \"\"\"\n Returns a list of module names used by pytest that should be\n included by cx_freeze.\n \"\"\"\n import py\n import _pytest\n\n result = list(_iter_all_modules(py))\n result += list(_iter_all_modules(_pytest))\n return result\n\n\ndef _iter_all_modules(package, prefix=\"\"):\n \"\"\"\n Iterates over the names of all modules that can be found in the given\n package, recursively.\n Example:\n _iter_all_modules(_pytest) ->\n ['_pytest.assertion.newinterpret',\n '_pytest.capture',\n '_pytest.core',\n ...\n ]\n \"\"\"\n import os\n import pkgutil\n\n if type(package) is not str:\n path, prefix = package.__path__[0], package.__name__ + \".\"\n else:\n path = package\n for _, name, is_package in pkgutil.iter_modules([path]):\n if is_package:\n for m in _iter_all_modules(os.path.join(path, name), prefix=name + \".\"):\n yield prefix + m\n else:\n yield prefix + name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/helpconfig.py__version_info_help_me_HelpAction.__call__.if_getattr_parser__parser.raise_PrintHelp": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/helpconfig.py__version_info_help_me_HelpAction.__call__.if_getattr_parser__parser.raise_PrintHelp", "embedding": null, "metadata": {"file_path": "src/_pytest/helpconfig.py", "file_name": "helpconfig.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 40, "span_ids": ["imports", "docstring", "HelpAction", "HelpAction.__call__"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" version info, help messages, tracing configuration. \"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nfrom argparse import Action\n\nimport py\n\nimport pytest\nfrom _pytest.config import PrintHelp\n\n\nclass HelpAction(Action):\n \"\"\"This is an argparse Action that will raise an exception in\n order to skip the rest of the argument parsing when --help is passed.\n This prevents argparse from quitting due to missing required arguments\n when any are defined, for example by ``pytest_addoption``.\n This is similar to the way that the builtin argparse --help option is\n implemented by raising SystemExit.\n \"\"\"\n\n def __init__(self, option_strings, dest=None, default=False, help=None):\n super(HelpAction, self).__init__(\n option_strings=option_strings,\n dest=dest,\n const=True,\n default=default,\n nargs=0,\n help=help,\n )\n\n def __call__(self, parser, namespace, values, option_string=None):\n setattr(namespace, self.dest, self.const)\n\n # We should only skip the rest of the parsing after preparse is done\n if getattr(parser._parser, \"after_preparse\", False):\n raise PrintHelp", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/helpconfig.py_pytest_addoption_pytest_addoption.None_5": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/helpconfig.py_pytest_addoption_pytest_addoption.None_5", "embedding": null, "metadata": {"file_path": "src/_pytest/helpconfig.py", "file_name": "helpconfig.py", "file_type": "text/x-python", "category": "implementation", "start_line": 43, "end_line": 87, "span_ids": ["pytest_addoption"], "tokens": 297}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_addoption(parser):\n group = parser.getgroup(\"debugconfig\")\n group.addoption(\n \"--version\",\n action=\"store_true\",\n help=\"display pytest lib version and import information.\",\n )\n group._addoption(\n \"-h\",\n \"--help\",\n action=HelpAction,\n dest=\"help\",\n help=\"show help message and configuration info\",\n )\n group._addoption(\n \"-p\",\n action=\"append\",\n dest=\"plugins\",\n default=[],\n metavar=\"name\",\n help=\"early-load given plugin module name or entry point (multi-allowed). \"\n \"To avoid loading of plugins, use the `no:` prefix, e.g. \"\n \"`no:doctest`.\",\n )\n group.addoption(\n \"--traceconfig\",\n \"--trace-config\",\n action=\"store_true\",\n default=False,\n help=\"trace considerations of conftest.py files.\",\n ),\n group.addoption(\n \"--debug\",\n action=\"store_true\",\n dest=\"debug\",\n default=False,\n help=\"store internal tracing debug information in 'pytestdebug.log'.\",\n )\n group._addoption(\n \"-o\",\n \"--override-ini\",\n dest=\"override_ini\",\n action=\"append\",\n help='override ini option with \"option=value\" style, e.g. `-o xfail_strict=True -o cache_dir=cache`.',\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/helpconfig.py_pytest_cmdline_parse_pytest_cmdline_parse.if_config_option_debug_.config_add_cleanup_unset_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/helpconfig.py_pytest_cmdline_parse_pytest_cmdline_parse.if_config_option_debug_.config_add_cleanup_unset_", "embedding": null, "metadata": {"file_path": "src/_pytest/helpconfig.py", "file_name": "helpconfig.py", "file_type": "text/x-python", "category": "implementation", "start_line": 90, "end_line": 118, "span_ids": ["pytest_cmdline_parse"], "tokens": 218}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.hookimpl(hookwrapper=True)\ndef pytest_cmdline_parse():\n outcome = yield\n config = outcome.get_result()\n if config.option.debug:\n path = os.path.abspath(\"pytestdebug.log\")\n debugfile = open(path, \"w\")\n debugfile.write(\n \"versions pytest-%s, py-%s, \"\n \"python-%s\\ncwd=%s\\nargs=%s\\n\\n\"\n % (\n pytest.__version__,\n py.__version__,\n \".\".join(map(str, sys.version_info)),\n os.getcwd(),\n config._origargs,\n )\n )\n config.trace.root.setwriter(debugfile.write)\n undo_tracing = config.pluginmanager.enable_tracing()\n sys.stderr.write(\"writing pytestdebug information to %s\\n\" % path)\n\n def unset_tracing():\n debugfile.close()\n sys.stderr.write(\"wrote pytestdebug information to %s\\n\" % debugfile.name)\n config.trace.root.setwriter(None)\n undo_tracing()\n\n config.add_cleanup(unset_tracing)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/helpconfig.py_showversion_pytest_cmdline_main.if_config_option_version_.elif_config_option_help_.return.0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/helpconfig.py_showversion_pytest_cmdline_main.if_config_option_version_.elif_config_option_help_.return.0", "embedding": null, "metadata": {"file_path": "src/_pytest/helpconfig.py", "file_name": "helpconfig.py", "file_type": "text/x-python", "category": "implementation", "start_line": 121, "end_line": 140, "span_ids": ["pytest_cmdline_main", "showversion"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def showversion(config):\n p = py.path.local(pytest.__file__)\n sys.stderr.write(\n \"This is pytest version %s, imported from %s\\n\" % (pytest.__version__, p)\n )\n plugininfo = getpluginversioninfo(config)\n if plugininfo:\n for line in plugininfo:\n sys.stderr.write(line + \"\\n\")\n\n\ndef pytest_cmdline_main(config):\n if config.option.version:\n showversion(config)\n return 0\n elif config.option.help:\n config._do_configure()\n showhelp(config)\n config._ensure_unconfigure()\n return 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/helpconfig.py_showhelp_showhelp.return": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/helpconfig.py_showhelp_showhelp.return", "embedding": null, "metadata": {"file_path": "src/_pytest/helpconfig.py", "file_name": "helpconfig.py", "file_type": "text/x-python", "category": "implementation", "start_line": 143, "end_line": 186, "span_ids": ["showhelp"], "tokens": 377}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def showhelp(config):\n reporter = config.pluginmanager.get_plugin(\"terminalreporter\")\n tw = reporter._tw\n tw.write(config._parser.optparser.format_help())\n tw.line()\n tw.line()\n tw.line(\n \"[pytest] ini-options in the first pytest.ini|tox.ini|setup.cfg file found:\"\n )\n tw.line()\n\n columns = tw.fullwidth # costly call\n for name in config._parser._ininames:\n help, type, default = config._parser._inidict[name]\n if type is None:\n type = \"string\"\n spec = \"%s (%s)\" % (name, type)\n line = \" %-24s %s\" % (spec, help)\n tw.line(line[:columns])\n\n tw.line()\n tw.line(\"environment variables:\")\n vars = [\n (\"PYTEST_ADDOPTS\", \"extra command line options\"),\n (\"PYTEST_PLUGINS\", \"comma-separated plugins to load during startup\"),\n (\"PYTEST_DISABLE_PLUGIN_AUTOLOAD\", \"set to disable plugin auto-loading\"),\n (\"PYTEST_DEBUG\", \"set to enable debug tracing of pytest's internals\"),\n ]\n for name, help in vars:\n tw.line(\" %-24s %s\" % (name, help))\n tw.line()\n tw.line()\n\n tw.line(\"to see available markers type: pytest --markers\")\n tw.line(\"to see available fixtures type: pytest --fixtures\")\n tw.line(\n \"(shown according to specified file_or_dir or current dir \"\n \"if not specified; fixtures with leading '_' are only shown \"\n \"with the '-v' option\"\n )\n\n for warningreport in reporter.stats.get(\"warnings\", []):\n tw.line(\"warning : \" + warningreport.message, red=True)\n return", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/helpconfig.py_conftest_options_getpluginversioninfo.return.lines": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/helpconfig.py_conftest_options_getpluginversioninfo.return.lines", "embedding": null, "metadata": {"file_path": "src/_pytest/helpconfig.py", "file_name": "helpconfig.py", "file_type": "text/x-python", "category": "implementation", "start_line": 213, "end_line": 225, "span_ids": ["impl", "getpluginversioninfo"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "conftest_options = [(\"pytest_plugins\", \"list of plugin names to load\")]\n\n\ndef getpluginversioninfo(config):\n lines = []\n plugininfo = config.pluginmanager.list_plugin_distinfo()\n if plugininfo:\n lines.append(\"setuptools registered plugins:\")\n for plugin, dist in plugininfo:\n loc = getattr(plugin, \"__file__\", repr(plugin))\n content = \"%s-%s at %s\" % (dist.project_name, dist.version, loc)\n lines.append(\" \" + content)\n return lines", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/helpconfig.py_pytest_report_header_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/helpconfig.py_pytest_report_header_", "embedding": null, "metadata": {"file_path": "src/_pytest/helpconfig.py", "file_name": "helpconfig.py", "file_type": "text/x-python", "category": "implementation", "start_line": 228, "end_line": 247, "span_ids": ["pytest_report_header"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_report_header(config):\n lines = []\n if config.option.debug or config.option.traceconfig:\n lines.append(\"using: pytest-%s pylib-%s\" % (pytest.__version__, py.__version__))\n\n verinfo = getpluginversioninfo(config)\n if verinfo:\n lines.extend(verinfo)\n\n if config.option.traceconfig:\n lines.append(\"active plugins:\")\n items = config.pluginmanager.list_name_plugin()\n for name, plugin in items:\n if hasattr(plugin, \"__file__\"):\n r = plugin.__file__\n else:\n r = repr(plugin)\n lines.append(\" %-20s: %s\" % (name, r))\n return lines", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py__hook_specifications_f_pytest_plugin_registered._a_new_pytest_plugin_g": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py__hook_specifications_f_pytest_plugin_registered._a_new_pytest_plugin_g", "embedding": null, "metadata": {"file_path": "src/_pytest/hookspec.py", "file_name": "hookspec.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 35, "span_ids": ["impl", "pytest_plugin_registered", "docstring", "pytest_addhooks", "imports"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" hook specifications for pytest plugins, invoked from main.py and builtin plugins. \"\"\"\nfrom pluggy import HookspecMarker\n\nfrom _pytest.deprecated import PYTEST_LOGWARNING\n\nhookspec = HookspecMarker(\"pytest\")\n\n# -------------------------------------------------------------------------\n# Initialization hooks called for every plugin\n# -------------------------------------------------------------------------\n\n\n@hookspec(historic=True)\ndef pytest_addhooks(pluginmanager):\n \"\"\"called at plugin registration time to allow adding new hooks via a call to\n ``pluginmanager.add_hookspecs(module_or_class, prefix)``.\n\n\n :param _pytest.config.PytestPluginManager pluginmanager: pytest plugin manager\n\n .. note::\n This hook is incompatible with ``hookwrapper=True``.\n \"\"\"\n\n\n@hookspec(historic=True)\ndef pytest_plugin_registered(plugin, manager):\n \"\"\" a new pytest plugin got registered.\n\n :param plugin: the plugin module or instance\n :param _pytest.config.PytestPluginManager manager: pytest plugin manager\n\n .. note::\n This hook is incompatible with ``hookwrapper=True``.\n \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_addoption_pytest_addoption._register_argparse_styl": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_addoption_pytest_addoption._register_argparse_styl", "embedding": null, "metadata": {"file_path": "src/_pytest/hookspec.py", "file_name": "hookspec.py", "file_type": "text/x-python", "category": "implementation", "start_line": 38, "end_line": 68, "span_ids": ["pytest_addoption"], "tokens": 285}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@hookspec(historic=True)\ndef pytest_addoption(parser):\n \"\"\"register argparse-style options and ini-style config values,\n called once at the beginning of a test run.\n\n .. note::\n\n This function should be implemented only in plugins or ``conftest.py``\n files situated at the tests root directory due to how pytest\n :ref:`discovers plugins during startup `.\n\n :arg _pytest.config.Parser parser: To add command line options, call\n :py:func:`parser.addoption(...) <_pytest.config.Parser.addoption>`.\n To add ini-file values call :py:func:`parser.addini(...)\n <_pytest.config.Parser.addini>`.\n\n Options can later be accessed through the\n :py:class:`config <_pytest.config.Config>` object, respectively:\n\n - :py:func:`config.getoption(name) <_pytest.config.Config.getoption>` to\n retrieve the value of a command line option.\n\n - :py:func:`config.getini(name) <_pytest.config.Config.getini>` to retrieve\n a value read from an ini-style file.\n\n The config object is passed around on many internal objects via the ``.config``\n attribute or can be retrieved as the ``pytestconfig`` fixture.\n\n .. note::\n This hook is incompatible with ``hookwrapper=True``.\n \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_configure_pytest_configure._": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_configure_pytest_configure._", "embedding": null, "metadata": {"file_path": "src/_pytest/hookspec.py", "file_name": "hookspec.py", "file_type": "text/x-python", "category": "implementation", "start_line": 71, "end_line": 86, "span_ids": ["pytest_configure"], "tokens": 104}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@hookspec(historic=True)\ndef pytest_configure(config):\n \"\"\"\n Allows plugins and conftest files to perform initial configuration.\n\n This hook is called for every plugin and initial conftest file\n after command line options have been parsed.\n\n After that, the hook is called for other conftest files as they are\n imported.\n\n .. note::\n This hook is incompatible with ``hookwrapper=True``.\n\n :arg _pytest.config.Config config: pytest config object\n \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_None_4_pytest_cmdline_parse._return_initialized_con": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_None_4_pytest_cmdline_parse._return_initialized_con", "embedding": null, "metadata": {"file_path": "src/_pytest/hookspec.py", "file_name": "hookspec.py", "file_type": "text/x-python", "category": "implementation", "start_line": 89, "end_line": 107, "span_ids": ["pytest_cmdline_parse", "pytest_configure"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -------------------------------------------------------------------------\n# Bootstrapping hooks called for plugins registered early enough:\n# internal and 3rd party plugins.\n# -------------------------------------------------------------------------\n\n\n@hookspec(firstresult=True)\ndef pytest_cmdline_parse(pluginmanager, args):\n \"\"\"return initialized config object, parsing the specified args.\n\n Stops at first non-None result, see :ref:`firstresult`\n\n .. note::\n This hook will only be called for plugin classes passed to the ``plugins`` arg when using `pytest.main`_ to\n perform an in-process test run.\n\n :param _pytest.config.PytestPluginManager pluginmanager: pytest plugin manager\n :param list[str] args: list of arguments passed on the command line\n \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_cmdline_preparse_pytest_cmdline_preparse._Deprecated_modif": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_cmdline_preparse_pytest_cmdline_preparse._Deprecated_modif", "embedding": null, "metadata": {"file_path": "src/_pytest/hookspec.py", "file_name": "hookspec.py", "file_type": "text/x-python", "category": "implementation", "start_line": 110, "end_line": 121, "span_ids": ["pytest_cmdline_preparse"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_cmdline_preparse(config, args):\n \"\"\"(**Deprecated**) modify command line arguments before option parsing.\n\n This hook is considered deprecated and will be removed in a future pytest version. Consider\n using :func:`pytest_load_initial_conftests` instead.\n\n .. note::\n This hook will not be called for ``conftest.py`` files, only for setuptools plugins.\n\n :param _pytest.config.Config config: pytest config object\n :param list[str] args: list of arguments passed on the command line\n \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_cmdline_main_pytest_load_initial_conftests._implements_the_loadin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_cmdline_main_pytest_load_initial_conftests._implements_the_loadin", "embedding": null, "metadata": {"file_path": "src/_pytest/hookspec.py", "file_name": "hookspec.py", "file_type": "text/x-python", "category": "implementation", "start_line": 124, "end_line": 148, "span_ids": ["pytest_cmdline_main", "pytest_load_initial_conftests"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@hookspec(firstresult=True)\ndef pytest_cmdline_main(config):\n \"\"\" called for performing the main command line action. The default\n implementation will invoke the configure hooks and runtest_mainloop.\n\n .. note::\n This hook will not be called for ``conftest.py`` files, only for setuptools plugins.\n\n Stops at first non-None result, see :ref:`firstresult`\n\n :param _pytest.config.Config config: pytest config object\n \"\"\"\n\n\ndef pytest_load_initial_conftests(early_config, parser, args):\n \"\"\" implements the loading of initial conftest files ahead\n of command line option parsing.\n\n .. note::\n This hook will not be called for ``conftest.py`` files, only for setuptools plugins.\n\n :param _pytest.config.Config early_config: pytest config object\n :param list[str] args: list of arguments passed on the command line\n :param _pytest.config.Parser parser: to add command line options\n \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_None_8_pytest_make_collect_report._perform_collector_c": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_None_8_pytest_make_collect_report._perform_collector_c", "embedding": null, "metadata": {"file_path": "src/_pytest/hookspec.py", "file_name": "hookspec.py", "file_type": "text/x-python", "category": "implementation", "start_line": 151, "end_line": 237, "span_ids": ["pytest_collection_modifyitems", "pytest_itemcollected", "pytest_collection", "pytest_ignore_collect", "pytest_collection_finish", "pytest_deselected", "pytest_make_collect_report", "pytest_collectstart", "pytest_collect_directory", "pytest_collect_file", "pytest_load_initial_conftests", "pytest_collectreport"], "tokens": 524}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -------------------------------------------------------------------------\n# collection hooks\n# -------------------------------------------------------------------------\n\n\n@hookspec(firstresult=True)\ndef pytest_collection(session):\n \"\"\"Perform the collection protocol for the given session.\n\n Stops at first non-None result, see :ref:`firstresult`.\n\n :param _pytest.main.Session session: the pytest session object\n \"\"\"\n\n\ndef pytest_collection_modifyitems(session, config, items):\n \"\"\" called after collection has been performed, may filter or re-order\n the items in-place.\n\n :param _pytest.main.Session session: the pytest session object\n :param _pytest.config.Config config: pytest config object\n :param List[_pytest.nodes.Item] items: list of item objects\n \"\"\"\n\n\ndef pytest_collection_finish(session):\n \"\"\" called after collection has been performed and modified.\n\n :param _pytest.main.Session session: the pytest session object\n \"\"\"\n\n\n@hookspec(firstresult=True)\ndef pytest_ignore_collect(path, config):\n \"\"\" return True to prevent considering this path for collection.\n This hook is consulted for all files and directories prior to calling\n more specific hooks.\n\n Stops at first non-None result, see :ref:`firstresult`\n\n :param path: a :py:class:`py.path.local` - the path to analyze\n :param _pytest.config.Config config: pytest config object\n \"\"\"\n\n\n@hookspec(firstresult=True)\ndef pytest_collect_directory(path, parent):\n \"\"\" called before traversing a directory for collection files.\n\n Stops at first non-None result, see :ref:`firstresult`\n\n :param path: a :py:class:`py.path.local` - the path to analyze\n \"\"\"\n\n\ndef pytest_collect_file(path, parent):\n \"\"\" return collection Node or None for the given path. Any new node\n needs to have the specified ``parent`` as a parent.\n\n :param path: a :py:class:`py.path.local` - the path to collect\n \"\"\"\n\n\n# logging hooks for collection\n\n\ndef pytest_collectstart(collector):\n \"\"\" collector starts collecting. \"\"\"\n\n\ndef pytest_itemcollected(item):\n \"\"\" we just collected a test item. \"\"\"\n\n\ndef pytest_collectreport(report):\n \"\"\" collector finished collecting. \"\"\"\n\n\ndef pytest_deselected(items):\n \"\"\" called for test items deselected, e.g. by keyword. \"\"\"\n\n\n@hookspec(firstresult=True)\ndef pytest_make_collect_report(collector):\n \"\"\" perform ``collector.collect()`` and return a CollectReport.\n\n Stops at first non-None result, see :ref:`firstresult` \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_None_12_pytest_pycollect_makemodule._return_a_Module_colle": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_None_12_pytest_pycollect_makemodule._return_a_Module_colle", "embedding": null, "metadata": {"file_path": "src/_pytest/hookspec.py", "file_name": "hookspec.py", "file_type": "text/x-python", "category": "implementation", "start_line": 240, "end_line": 255, "span_ids": ["pytest_pycollect_makemodule", "pytest_make_collect_report"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -------------------------------------------------------------------------\n# Python test function related hooks\n# -------------------------------------------------------------------------\n\n\n@hookspec(firstresult=True)\ndef pytest_pycollect_makemodule(path, parent):\n \"\"\" return a Module collector or None for the given path.\n This hook will be called for each matching test module path.\n The pytest_collect_file hook needs to be used if you want to\n create test modules for files that do not match as a test module.\n\n Stops at first non-None result, see :ref:`firstresult`\n\n :param path: a :py:class:`py.path.local` - the path of module to collect\n \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_pycollect_makeitem_pytest_generate_tests._generate_multiple_p": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_pycollect_makeitem_pytest_generate_tests._generate_multiple_p", "embedding": null, "metadata": {"file_path": "src/_pytest/hookspec.py", "file_name": "hookspec.py", "file_type": "text/x-python", "category": "implementation", "start_line": 258, "end_line": 273, "span_ids": ["pytest_pycollect_makeitem", "pytest_generate_tests", "pytest_pyfunc_call"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@hookspec(firstresult=True)\ndef pytest_pycollect_makeitem(collector, name, obj):\n \"\"\" return custom item/collector for a python object in a module, or None.\n\n Stops at first non-None result, see :ref:`firstresult` \"\"\"\n\n\n@hookspec(firstresult=True)\ndef pytest_pyfunc_call(pyfuncitem):\n \"\"\" call underlying test function.\n\n Stops at first non-None result, see :ref:`firstresult` \"\"\"\n\n\ndef pytest_generate_tests(metafunc):\n \"\"\" generate (multiple) parametrized calls to a test function.\"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_make_parametrize_id_pytest_itemstart._Deprecated_use_p": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_make_parametrize_id_pytest_itemstart._Deprecated_use_p", "embedding": null, "metadata": {"file_path": "src/_pytest/hookspec.py", "file_name": "hookspec.py", "file_type": "text/x-python", "category": "implementation", "start_line": 276, "end_line": 307, "span_ids": ["pytest_itemstart", "pytest_make_parametrize_id", "pytest_runtestloop"], "tokens": 229}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@hookspec(firstresult=True)\ndef pytest_make_parametrize_id(config, val, argname):\n \"\"\"Return a user-friendly string representation of the given ``val`` that will be used\n by @pytest.mark.parametrize calls. Return None if the hook doesn't know about ``val``.\n The parameter name is available as ``argname``, if required.\n\n Stops at first non-None result, see :ref:`firstresult`\n\n :param _pytest.config.Config config: pytest config object\n :param val: the parametrized value\n :param str argname: the automatic parameter name produced by pytest\n \"\"\"\n\n\n# -------------------------------------------------------------------------\n# generic runtest related hooks\n# -------------------------------------------------------------------------\n\n\n@hookspec(firstresult=True)\ndef pytest_runtestloop(session):\n \"\"\" called for performing the main runtest loop\n (after collection finished).\n\n Stops at first non-None result, see :ref:`firstresult`\n\n :param _pytest.main.Session session: the pytest session object\n \"\"\"\n\n\ndef pytest_itemstart(item, node):\n \"\"\"(**Deprecated**) use pytest_runtest_logstart. \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_runtest_protocol_pytest_runtest_protocol._implements_the_runtes": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_runtest_protocol_pytest_runtest_protocol._implements_the_runtes", "embedding": null, "metadata": {"file_path": "src/_pytest/hookspec.py", "file_name": "hookspec.py", "file_type": "text/x-python", "category": "implementation", "start_line": 310, "end_line": 325, "span_ids": ["pytest_runtest_protocol"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@hookspec(firstresult=True)\ndef pytest_runtest_protocol(item, nextitem):\n \"\"\" implements the runtest_setup/call/teardown protocol for\n the given test item, including capturing exceptions and calling\n reporting hooks.\n\n :arg item: test item for which the runtest protocol is performed.\n\n :arg nextitem: the scheduled-to-be-next test item (or None if this\n is the end my friend). This argument is passed on to\n :py:func:`pytest_runtest_teardown`.\n\n :return boolean: True if no further hook implementations should be invoked.\n\n\n Stops at first non-None result, see :ref:`firstresult` \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_runtest_logstart_pytest_runtest_logreport._process_a_test_setup_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_runtest_logstart_pytest_runtest_logreport._process_a_test_setup_", "embedding": null, "metadata": {"file_path": "src/_pytest/hookspec.py", "file_name": "hookspec.py", "file_type": "text/x-python", "category": "implementation", "start_line": 328, "end_line": 379, "span_ids": ["pytest_runtest_logstart", "pytest_runtest_logreport", "pytest_runtest_logfinish", "pytest_runtest_makereport", "pytest_runtest_teardown", "pytest_runtest_setup", "pytest_runtest_call"], "tokens": 435}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_runtest_logstart(nodeid, location):\n \"\"\" signal the start of running a single test item.\n\n This hook will be called **before** :func:`pytest_runtest_setup`, :func:`pytest_runtest_call` and\n :func:`pytest_runtest_teardown` hooks.\n\n :param str nodeid: full id of the item\n :param location: a triple of ``(filename, linenum, testname)``\n \"\"\"\n\n\ndef pytest_runtest_logfinish(nodeid, location):\n \"\"\" signal the complete finish of running a single test item.\n\n This hook will be called **after** :func:`pytest_runtest_setup`, :func:`pytest_runtest_call` and\n :func:`pytest_runtest_teardown` hooks.\n\n :param str nodeid: full id of the item\n :param location: a triple of ``(filename, linenum, testname)``\n \"\"\"\n\n\ndef pytest_runtest_setup(item):\n \"\"\" called before ``pytest_runtest_call(item)``. \"\"\"\n\n\ndef pytest_runtest_call(item):\n \"\"\" called to execute the test ``item``. \"\"\"\n\n\ndef pytest_runtest_teardown(item, nextitem):\n \"\"\" called after ``pytest_runtest_call``.\n\n :arg nextitem: the scheduled-to-be-next test item (None if no further\n test item is scheduled). This argument can be used to\n perform exact teardowns, i.e. calling just enough finalizers\n so that nextitem only needs to call setup-functions.\n \"\"\"\n\n\n@hookspec(firstresult=True)\ndef pytest_runtest_makereport(item, call):\n \"\"\" return a :py:class:`_pytest.runner.TestReport` object\n for the given :py:class:`pytest.Item <_pytest.main.Item>` and\n :py:class:`_pytest.runner.CallInfo`.\n\n Stops at first non-None result, see :ref:`firstresult` \"\"\"\n\n\ndef pytest_runtest_logreport(report):\n \"\"\" process a test setup/call/teardown report relating to\n the respective phase of executing a test. \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_report_to_serializable_pytest_report_to_serializable._": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_report_to_serializable_pytest_report_to_serializable._", "embedding": null, "metadata": {"file_path": "src/_pytest/hookspec.py", "file_name": "hookspec.py", "file_type": "text/x-python", "category": "implementation", "start_line": 382, "end_line": 397, "span_ids": ["pytest_report_to_serializable"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@hookspec(firstresult=True)\ndef pytest_report_to_serializable(config, report):\n \"\"\"\n .. warning::\n This hook is experimental and subject to change between pytest releases, even\n bug fixes.\n\n The intent is for this to be used by plugins maintained by the core-devs, such\n as ``pytest-xdist``, ``pytest-subtests``, and as a replacement for the internal\n 'resultlog' plugin.\n\n In the future it might become part of the public hook API.\n\n Serializes the given report object into a data structure suitable for sending\n over the wire, e.g. converted to JSON.\n \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_report_from_serializable_pytest_report_from_serializable._": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_report_from_serializable_pytest_report_from_serializable._", "embedding": null, "metadata": {"file_path": "src/_pytest/hookspec.py", "file_name": "hookspec.py", "file_type": "text/x-python", "category": "implementation", "start_line": 400, "end_line": 414, "span_ids": ["pytest_report_from_serializable"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@hookspec(firstresult=True)\ndef pytest_report_from_serializable(config, data):\n \"\"\"\n .. warning::\n This hook is experimental and subject to change between pytest releases, even\n bug fixes.\n\n The intent is for this to be used by plugins maintained by the core-devs, such\n as ``pytest-xdist``, ``pytest-subtests``, and as a replacement for the internal\n 'resultlog' plugin.\n\n In the future it might become part of the public hook API.\n\n Restores a report object previously serialized with pytest_report_to_serializable().\n \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_None_18_pytest_unconfigure._called_before_test_pr": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_None_18_pytest_unconfigure._called_before_test_pr", "embedding": null, "metadata": {"file_path": "src/_pytest/hookspec.py", "file_name": "hookspec.py", "file_type": "text/x-python", "category": "implementation", "start_line": 417, "end_line": 468, "span_ids": ["pytest_report_from_serializable", "pytest_fixture_post_finalizer", "pytest_sessionfinish", "pytest_unconfigure", "pytest_sessionstart", "pytest_fixture_setup"], "tokens": 306}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -------------------------------------------------------------------------\n# Fixture related hooks\n# -------------------------------------------------------------------------\n\n\n@hookspec(firstresult=True)\ndef pytest_fixture_setup(fixturedef, request):\n \"\"\" performs fixture setup execution.\n\n :return: The return value of the call to the fixture function\n\n Stops at first non-None result, see :ref:`firstresult`\n\n .. note::\n If the fixture function returns None, other implementations of\n this hook function will continue to be called, according to the\n behavior of the :ref:`firstresult` option.\n \"\"\"\n\n\ndef pytest_fixture_post_finalizer(fixturedef, request):\n \"\"\" called after fixture teardown, but before the cache is cleared so\n the fixture result cache ``fixturedef.cached_result`` can\n still be accessed.\"\"\"\n\n\n# -------------------------------------------------------------------------\n# test session related hooks\n# -------------------------------------------------------------------------\n\n\ndef pytest_sessionstart(session):\n \"\"\" called after the ``Session`` object has been created and before performing collection\n and entering the run test loop.\n\n :param _pytest.main.Session session: the pytest session object\n \"\"\"\n\n\ndef pytest_sessionfinish(session, exitstatus):\n \"\"\" called after whole test run finished, right before returning the exit status to the system.\n\n :param _pytest.main.Session session: the pytest session object\n :param int exitstatus: the status which pytest will return to the system\n \"\"\"\n\n\ndef pytest_unconfigure(config):\n \"\"\" called before test process is exited.\n\n :param _pytest.config.Config config: pytest config object\n \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_None_24_pytest_assertrepr_compare._return_explanation_for": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_None_24_pytest_assertrepr_compare._return_explanation_for", "embedding": null, "metadata": {"file_path": "src/_pytest/hookspec.py", "file_name": "hookspec.py", "file_type": "text/x-python", "category": "implementation", "start_line": 471, "end_line": 485, "span_ids": ["pytest_assertrepr_compare", "pytest_unconfigure"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -------------------------------------------------------------------------\n# hooks for customizing the assert methods\n# -------------------------------------------------------------------------\n\n\ndef pytest_assertrepr_compare(config, op, left, right):\n \"\"\"return explanation for comparisons in failing assert expressions.\n\n Return None for no custom explanation, otherwise return a list\n of strings. The strings will be joined by newlines but any newlines\n *in* a string will be escaped. Note that all but the first line will\n be indented slightly, the intention is for the first line to be a summary.\n\n :param _pytest.config.Config config: pytest config object\n \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_None_27_pytest_report_header._return_a_string_or_li": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_None_27_pytest_report_header._return_a_string_or_li", "embedding": null, "metadata": {"file_path": "src/_pytest/hookspec.py", "file_name": "hookspec.py", "file_type": "text/x-python", "category": "implementation", "start_line": 488, "end_line": 504, "span_ids": ["pytest_assertrepr_compare", "pytest_report_header"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -------------------------------------------------------------------------\n# hooks for influencing reporting (invoked from _pytest_terminal)\n# -------------------------------------------------------------------------\n\n\ndef pytest_report_header(config, startdir):\n \"\"\" return a string or list of strings to be displayed as header info for terminal reporting.\n\n :param _pytest.config.Config config: pytest config object\n :param startdir: py.path object with the starting dir\n\n .. note::\n\n This function should be implemented only in plugins or ``conftest.py``\n files situated at the tests root directory due to how pytest\n :ref:`discovers plugins during startup `.\n \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_report_collectionfinish_pytest_report_collectionfinish._": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_report_collectionfinish_pytest_report_collectionfinish._", "embedding": null, "metadata": {"file_path": "src/_pytest/hookspec.py", "file_name": "hookspec.py", "file_type": "text/x-python", "category": "implementation", "start_line": 507, "end_line": 518, "span_ids": ["pytest_report_collectionfinish"], "tokens": 110}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_report_collectionfinish(config, startdir, items):\n \"\"\"\n .. versionadded:: 3.2\n\n return a string or list of strings to be displayed after collection has finished successfully.\n\n This strings will be displayed after the standard \"collected X items\" message.\n\n :param _pytest.config.Config config: pytest config object\n :param startdir: py.path object with the starting dir\n :param items: list of pytest items that are going to be executed; this list should not be modified.\n \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_report_teststatus_pytest_terminal_summary._Add_a_section_to_termi": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_report_teststatus_pytest_terminal_summary._Add_a_section_to_termi", "embedding": null, "metadata": {"file_path": "src/_pytest/hookspec.py", "file_name": "hookspec.py", "file_type": "text/x-python", "category": "implementation", "start_line": 521, "end_line": 539, "span_ids": ["pytest_report_teststatus", "pytest_terminal_summary"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@hookspec(firstresult=True)\ndef pytest_report_teststatus(report, config):\n \"\"\" return result-category, shortletter and verbose word for reporting.\n\n :param _pytest.config.Config config: pytest config object\n\n Stops at first non-None result, see :ref:`firstresult` \"\"\"\n\n\ndef pytest_terminal_summary(terminalreporter, exitstatus, config):\n \"\"\"Add a section to terminal summary reporting.\n\n :param _pytest.terminal.TerminalReporter terminalreporter: the internal terminal reporter object\n :param int exitstatus: the exit status that will be reported back to the OS\n :param _pytest.config.Config config: pytest config object\n\n .. versionadded:: 4.2\n The ``config`` parameter.\n \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_logwarning_pytest_logwarning._": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_logwarning_pytest_logwarning._", "embedding": null, "metadata": {"file_path": "src/_pytest/hookspec.py", "file_name": "hookspec.py", "file_type": "text/x-python", "category": "implementation", "start_line": 542, "end_line": 560, "span_ids": ["pytest_logwarning"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@hookspec(historic=True, warn_on_impl=PYTEST_LOGWARNING)\ndef pytest_logwarning(message, code, nodeid, fslocation):\n \"\"\"\n .. deprecated:: 3.8\n\n This hook is will stop working in a future release.\n\n pytest no longer triggers this hook, but the\n terminal writer still implements it to display warnings issued by\n :meth:`_pytest.config.Config.warn` and :meth:`_pytest.nodes.Node.warn`. Calling those functions will be\n an error in future releases.\n\n process a warning specified by a message, a code string,\n a nodeid and fslocation (both of which may be None\n if the warning is not tied to a particular node/location).\n\n .. note::\n This hook is incompatible with ``hookwrapper=True``.\n \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_warning_captured_pytest_warning_captured._": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_pytest_warning_captured_pytest_warning_captured._", "embedding": null, "metadata": {"file_path": "src/_pytest/hookspec.py", "file_name": "hookspec.py", "file_type": "text/x-python", "category": "implementation", "start_line": 563, "end_line": 584, "span_ids": ["pytest_warning_captured"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@hookspec(historic=True)\ndef pytest_warning_captured(warning_message, when, item):\n \"\"\"\n Process a warning captured by the internal pytest warnings plugin.\n\n :param warnings.WarningMessage warning_message:\n The captured warning. This is the same object produced by :py:func:`warnings.catch_warnings`, and contains\n the same attributes as the parameters of :py:func:`warnings.showwarning`.\n\n :param str when:\n Indicates when the warning was captured. Possible values:\n\n * ``\"config\"``: during pytest configuration/initialization stage.\n * ``\"collect\"``: during test collection.\n * ``\"runtest\"``: during test execution.\n\n :param pytest.Item|None item:\n **DEPRECATED**: This parameter is incompatible with ``pytest-xdist``, and will always receive ``None``\n in a future release.\n\n The item being executed if ``when`` is ``\"runtest\"``, otherwise ``None``.\n \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_None_30_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/hookspec.py_None_30_", "embedding": null, "metadata": {"file_path": "src/_pytest/hookspec.py", "file_name": "hookspec.py", "file_type": "text/x-python", "category": "implementation", "start_line": 587, "end_line": 639, "span_ids": ["pytest_enter_pdb", "pytest_leave_pdb", "pytest_doctest_prepare_content", "pytest_internalerror", "pytest_exception_interact", "pytest_keyboard_interrupt", "pytest_warning_captured"], "tokens": 301}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -------------------------------------------------------------------------\n# doctest hooks\n# -------------------------------------------------------------------------\n\n\n@hookspec(firstresult=True)\ndef pytest_doctest_prepare_content(content):\n \"\"\" return processed content for a given doctest\n\n Stops at first non-None result, see :ref:`firstresult` \"\"\"\n\n\n# -------------------------------------------------------------------------\n# error handling and internal debugging hooks\n# -------------------------------------------------------------------------\n\n\ndef pytest_internalerror(excrepr, excinfo):\n \"\"\" called for internal errors. \"\"\"\n\n\ndef pytest_keyboard_interrupt(excinfo):\n \"\"\" called for keyboard interrupt. \"\"\"\n\n\ndef pytest_exception_interact(node, call, report):\n \"\"\"called when an exception was raised which can potentially be\n interactively handled.\n\n This hook is only called if an exception was raised\n that is not an internal exception like ``skip.Exception``.\n \"\"\"\n\n\ndef pytest_enter_pdb(config, pdb):\n \"\"\" called upon pdb.set_trace(), can be used by plugins to take special\n action just before the python debugger enters in interactive mode.\n\n :param _pytest.config.Config config: pytest config object\n :param pdb.Pdb pdb: Pdb instance\n \"\"\"\n\n\ndef pytest_leave_pdb(config, pdb):\n \"\"\" called when leaving pdb (e.g. with continue after pdb.set_trace()).\n\n Can be used by plugins to take special action just after the python\n debugger leaves interactive mode.\n\n :param _pytest.config.Config config: pytest config object\n :param pdb.Pdb pdb: Pdb instance\n \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py___families_xunit2_fami": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py___families_xunit2_fami", "embedding": null, "metadata": {"file_path": "src/_pytest/junitxml.py", "file_name": "junitxml.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 88, "span_ids": ["impl", "bin_xml_escape", "merge_family", "imports:14", "docstring", "Junit", "impl:2", "impl:3", "imports", "impl:17"], "tokens": 687}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\n report test results in JUnit-XML format,\n for use with Jenkins and build integration servers.\n\n\nBased on initial code from Ross Lawley.\n\nOutput conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/\nsrc/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport os\nimport re\nimport sys\nimport time\n\nimport py\nimport six\n\nimport pytest\nfrom _pytest import nodes\nfrom _pytest.config import filename_arg\n\n# Python 2.X and 3.X compatibility\nif sys.version_info[0] < 3:\n from codecs import open\n\n\nclass Junit(py.xml.Namespace):\n pass\n\n\n# We need to get the subset of the invalid unicode ranges according to\n# XML 1.0 which are valid in this python build. Hence we calculate\n# this dynamically instead of hardcoding it. The spec range of valid\n# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]\n# | [#x10000-#x10FFFF]\n_legal_chars = (0x09, 0x0A, 0x0D)\n_legal_ranges = ((0x20, 0x7E), (0x80, 0xD7FF), (0xE000, 0xFFFD), (0x10000, 0x10FFFF))\n_legal_xml_re = [\n u\"%s-%s\" % (six.unichr(low), six.unichr(high))\n for (low, high) in _legal_ranges\n if low < sys.maxunicode\n]\n_legal_xml_re = [six.unichr(x) for x in _legal_chars] + _legal_xml_re\nillegal_xml_re = re.compile(u\"[^%s]\" % u\"\".join(_legal_xml_re))\ndel _legal_chars\ndel _legal_ranges\ndel _legal_xml_re\n\n_py_ext_re = re.compile(r\"\\.py$\")\n\n\ndef bin_xml_escape(arg):\n def repl(matchobj):\n i = ord(matchobj.group())\n if i <= 0xFF:\n return u\"#x%02X\" % i\n else:\n return u\"#x%04X\" % i\n\n return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg)))\n\n\ndef merge_family(left, right):\n result = {}\n for kl, vl in left.items():\n for kr, vr in right.items():\n if not isinstance(vl, list):\n raise TypeError(type(vl))\n result[kl] = vl + vr\n left.update(result)\n\n\nfamilies = {}\nfamilies[\"_base\"] = {\"testcase\": [\"classname\", \"name\"]}\nfamilies[\"_base_legacy\"] = {\"testcase\": [\"file\", \"line\", \"url\"]}\n\n# xUnit 1.x inherits legacy attributes\nfamilies[\"xunit1\"] = families[\"_base\"].copy()\nmerge_family(families[\"xunit1\"], families[\"_base_legacy\"])\n\n# xUnit 2.x uses strict base attributes\nfamilies[\"xunit2\"] = families[\"_base\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py__NodeReporter__NodeReporter.make_properties_node.return._": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py__NodeReporter__NodeReporter.make_properties_node.return._", "embedding": null, "metadata": {"file_path": "src/_pytest/junitxml.py", "file_name": "junitxml.py", "file_type": "text/x-python", "category": "implementation", "start_line": 91, "end_line": 123, "span_ids": ["_NodeReporter.add_property", "_NodeReporter", "_NodeReporter.append", "_NodeReporter.make_properties_node", "_NodeReporter.add_attribute"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _NodeReporter(object):\n def __init__(self, nodeid, xml):\n self.id = nodeid\n self.xml = xml\n self.add_stats = self.xml.add_stats\n self.family = self.xml.family\n self.duration = 0\n self.properties = []\n self.nodes = []\n self.testcase = None\n self.attrs = {}\n\n def append(self, node):\n self.xml.add_stats(type(node).__name__)\n self.nodes.append(node)\n\n def add_property(self, name, value):\n self.properties.append((str(name), bin_xml_escape(value)))\n\n def add_attribute(self, name, value):\n self.attrs[str(name)] = bin_xml_escape(value)\n\n def make_properties_node(self):\n \"\"\"Return a Junit node containing custom properties, if any.\n \"\"\"\n if self.properties:\n return Junit.properties(\n [\n Junit.property(name=name, value=value)\n for name, value in self.properties\n ]\n )\n return \"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py__NodeReporter.record_testreport__NodeReporter._add_simple.self_append_node_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py__NodeReporter.record_testreport__NodeReporter._add_simple.self_append_node_", "embedding": null, "metadata": {"file_path": "src/_pytest/junitxml.py", "file_name": "junitxml.py", "file_type": "text/x-python", "category": "implementation", "start_line": 125, "end_line": 166, "span_ids": ["_NodeReporter._add_simple", "_NodeReporter.to_xml", "_NodeReporter.record_testreport"], "tokens": 340}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _NodeReporter(object):\n\n def record_testreport(self, testreport):\n assert not self.testcase\n names = mangle_test_address(testreport.nodeid)\n existing_attrs = self.attrs\n classnames = names[:-1]\n if self.xml.prefix:\n classnames.insert(0, self.xml.prefix)\n attrs = {\n \"classname\": \".\".join(classnames),\n \"name\": bin_xml_escape(names[-1]),\n \"file\": testreport.location[0],\n }\n if testreport.location[1] is not None:\n attrs[\"line\"] = testreport.location[1]\n if hasattr(testreport, \"url\"):\n attrs[\"url\"] = testreport.url\n self.attrs = attrs\n self.attrs.update(existing_attrs) # restore any user-defined attributes\n\n # Preserve legacy testcase behavior\n if self.family == \"xunit1\":\n return\n\n # Filter out attributes not permitted by this test family.\n # Including custom attributes because they are not valid here.\n temp_attrs = {}\n for key in self.attrs.keys():\n if key in families[self.family][\"testcase\"]:\n temp_attrs[key] = self.attrs[key]\n self.attrs = temp_attrs\n\n def to_xml(self):\n testcase = Junit.testcase(time=\"%.3f\" % self.duration, **self.attrs)\n testcase.append(self.make_properties_node())\n for node in self.nodes:\n testcase.append(node)\n return testcase\n\n def _add_simple(self, kind, message, data=None):\n data = bin_xml_escape(data)\n node = kind(data, message=message)\n self.append(node)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py__NodeReporter.write_captured_output__NodeReporter.write_captured_output.None_1.if_content_.self_append_tag_bin_xml_e": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py__NodeReporter.write_captured_output__NodeReporter.write_captured_output.None_1.if_content_.self_append_tag_bin_xml_e", "embedding": null, "metadata": {"file_path": "src/_pytest/junitxml.py", "file_name": "junitxml.py", "file_type": "text/x-python", "category": "implementation", "start_line": 168, "end_line": 216, "span_ids": ["_NodeReporter.write_captured_output"], "tokens": 317}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _NodeReporter(object):\n\n def write_captured_output(self, report):\n content_out = report.capstdout\n content_log = report.caplog\n content_err = report.capstderr\n\n if content_log or content_out:\n if content_log and self.xml.logging == \"system-out\":\n if content_out:\n # syncing stdout and the log-output is not done yet. It's\n # probably not worth the effort. Therefore, first the captured\n # stdout is shown and then the captured logs.\n content = \"\\n\".join(\n [\n \" Captured Stdout \".center(80, \"-\"),\n content_out,\n \"\",\n \" Captured Log \".center(80, \"-\"),\n content_log,\n ]\n )\n else:\n content = content_log\n else:\n content = content_out\n\n if content:\n tag = getattr(Junit, \"system-out\")\n self.append(tag(bin_xml_escape(content)))\n\n if content_log or content_err:\n if content_log and self.xml.logging == \"system-err\":\n if content_err:\n content = \"\\n\".join(\n [\n \" Captured Stderr \".center(80, \"-\"),\n content_err,\n \"\",\n \" Captured Log \".center(80, \"-\"),\n content_log,\n ]\n )\n else:\n content = content_log\n else:\n content = content_err\n\n if content:\n tag = getattr(Junit, \"system-err\")\n self.append(tag(bin_xml_escape(content)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py__NodeReporter.append_pass__NodeReporter.append_failure.if_hasattr_report_wasxf.else_.self_append_fail_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py__NodeReporter.append_pass__NodeReporter.append_failure.if_hasattr_report_wasxf.else_.self_append_fail_", "embedding": null, "metadata": {"file_path": "src/_pytest/junitxml.py", "file_name": "junitxml.py", "file_type": "text/x-python", "category": "implementation", "start_line": 218, "end_line": 235, "span_ids": ["_NodeReporter.append_pass", "_NodeReporter.append_failure"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _NodeReporter(object):\n\n def append_pass(self, report):\n self.add_stats(\"passed\")\n\n def append_failure(self, report):\n # msg = str(report.longrepr.reprtraceback.extraline)\n if hasattr(report, \"wasxfail\"):\n self._add_simple(Junit.skipped, \"xfail-marked test passes unexpectedly\")\n else:\n if hasattr(report.longrepr, \"reprcrash\"):\n message = report.longrepr.reprcrash.message\n elif isinstance(report.longrepr, six.string_types):\n message = report.longrepr\n else:\n message = str(report.longrepr)\n message = bin_xml_escape(message)\n fail = Junit.failure(message=message)\n fail.append(bin_xml_escape(report.longrepr))\n self.append(fail)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py__NodeReporter.append_collect_error__NodeReporter.append_error.self__add_simple_Junit_er": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py__NodeReporter.append_collect_error__NodeReporter.append_error.self__add_simple_Junit_er", "embedding": null, "metadata": {"file_path": "src/_pytest/junitxml.py", "file_name": "junitxml.py", "file_type": "text/x-python", "category": "implementation", "start_line": 237, "end_line": 251, "span_ids": ["_NodeReporter.append_collect_skipped", "_NodeReporter.append_collect_error", "_NodeReporter.append_error"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _NodeReporter(object):\n\n def append_collect_error(self, report):\n # msg = str(report.longrepr.reprtraceback.extraline)\n self.append(\n Junit.error(bin_xml_escape(report.longrepr), message=\"collection failure\")\n )\n\n def append_collect_skipped(self, report):\n self._add_simple(Junit.skipped, \"collection skipped\", report.longrepr)\n\n def append_error(self, report):\n if report.when == \"teardown\":\n msg = \"test teardown failure\"\n else:\n msg = \"test setup failure\"\n self._add_simple(Junit.error, msg, report.longrepr)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py__NodeReporter.append_skipped__NodeReporter.finalize.self.to_xml.lambda_py_xml_raw_data_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py__NodeReporter.append_skipped__NodeReporter.finalize.self.to_xml.lambda_py_xml_raw_data_", "embedding": null, "metadata": {"file_path": "src/_pytest/junitxml.py", "file_name": "junitxml.py", "file_type": "text/x-python", "category": "implementation", "start_line": 253, "end_line": 281, "span_ids": ["_NodeReporter.append_skipped", "_NodeReporter.finalize"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _NodeReporter(object):\n\n def append_skipped(self, report):\n if hasattr(report, \"wasxfail\"):\n xfailreason = report.wasxfail\n if xfailreason.startswith(\"reason: \"):\n xfailreason = xfailreason[8:]\n self.append(\n Junit.skipped(\n \"\", type=\"pytest.xfail\", message=bin_xml_escape(xfailreason)\n )\n )\n else:\n filename, lineno, skipreason = report.longrepr\n if skipreason.startswith(\"Skipped: \"):\n skipreason = skipreason[9:]\n details = \"%s:%s: %s\" % (filename, lineno, skipreason)\n\n self.append(\n Junit.skipped(\n bin_xml_escape(details),\n type=\"pytest.skip\",\n message=bin_xml_escape(skipreason),\n )\n )\n self.write_captured_output(report)\n\n def finalize(self):\n data = self.to_xml().unicode(indent=0)\n self.__dict__.clear()\n self.to_xml = lambda: py.xml.raw(data)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py__warn_incompatibility_with_xunit2__warn_incompatibility_with_xunit2.if_xml_is_not_None_and_xm.request_node_warn_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py__warn_incompatibility_with_xunit2__warn_incompatibility_with_xunit2.if_xml_is_not_None_and_xm.request_node_warn_", "embedding": null, "metadata": {"file_path": "src/_pytest/junitxml.py", "file_name": "junitxml.py", "file_type": "text/x-python", "category": "implementation", "start_line": 284, "end_line": 296, "span_ids": ["_warn_incompatibility_with_xunit2"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _warn_incompatibility_with_xunit2(request, fixture_name):\n \"\"\"Emits a PytestWarning about the given fixture being incompatible with newer xunit revisions\"\"\"\n from _pytest.warning_types import PytestWarning\n\n xml = getattr(request.config, \"_xml\", None)\n if xml is not None and xml.family not in (\"xunit1\", \"legacy\"):\n request.node.warn(\n PytestWarning(\n \"{fixture_name} is incompatible with junit_family '{family}' (use 'legacy' or 'xunit1')\".format(\n fixture_name=fixture_name, family=xml.family\n )\n )\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_record_property_record_property.return.append_property": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_record_property_record_property.return.append_property", "embedding": null, "metadata": {"file_path": "src/_pytest/junitxml.py", "file_name": "junitxml.py", "file_type": "text/x-python", "category": "implementation", "start_line": 299, "end_line": 317, "span_ids": ["record_property"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture\ndef record_property(request):\n \"\"\"Add an extra properties the calling test.\n User properties become part of the test report and are available to the\n configured reporters, like JUnit XML.\n The fixture is callable with ``(name, value)``, with value being automatically\n xml-encoded.\n\n Example::\n\n def test_function(record_property):\n record_property(\"example_key\", 1)\n \"\"\"\n _warn_incompatibility_with_xunit2(request, \"record_property\")\n\n def append_property(name, value):\n request.node.user_properties.append((name, value))\n\n return append_property", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_pytest_addoption_pytest_addoption.None_5": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_pytest_addoption_pytest_addoption.None_5", "embedding": null, "metadata": {"file_path": "src/_pytest/junitxml.py", "file_name": "junitxml.py", "file_type": "text/x-python", "category": "implementation", "start_line": 387, "end_line": 425, "span_ids": ["pytest_addoption"], "tokens": 295}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_addoption(parser):\n group = parser.getgroup(\"terminal reporting\")\n group.addoption(\n \"--junitxml\",\n \"--junit-xml\",\n action=\"store\",\n dest=\"xmlpath\",\n metavar=\"path\",\n type=functools.partial(filename_arg, optname=\"--junitxml\"),\n default=None,\n help=\"create junit-xml style report file at given path.\",\n )\n group.addoption(\n \"--junitprefix\",\n \"--junit-prefix\",\n action=\"store\",\n metavar=\"str\",\n default=None,\n help=\"prepend prefix to classnames in junit-xml output\",\n )\n parser.addini(\n \"junit_suite_name\", \"Test suite name for JUnit report\", default=\"pytest\"\n )\n parser.addini(\n \"junit_logging\",\n \"Write captured log messages to JUnit report: \"\n \"one of no|system-out|system-err\",\n default=\"no\",\n ) # choices=['no', 'stdout', 'stderr'])\n parser.addini(\n \"junit_duration_report\",\n \"Duration time to report: one of total|call\",\n default=\"total\",\n ) # choices=['total', 'call'])\n parser.addini(\n \"junit_family\",\n \"Emit XML for schema: one of legacy|xunit1|xunit2\",\n default=\"xunit1\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_pytest_configure_pytest_configure.if_xmlpath_and_not_hasatt.config_pluginmanager_regi": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_pytest_configure_pytest_configure.if_xmlpath_and_not_hasatt.config_pluginmanager_regi", "embedding": null, "metadata": {"file_path": "src/_pytest/junitxml.py", "file_name": "junitxml.py", "file_type": "text/x-python", "category": "implementation", "start_line": 428, "end_line": 440, "span_ids": ["pytest_configure"], "tokens": 107}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_configure(config):\n xmlpath = config.option.xmlpath\n # prevent opening xmllog on slave nodes (xdist)\n if xmlpath and not hasattr(config, \"slaveinput\"):\n config._xml = LogXML(\n xmlpath,\n config.option.junitprefix,\n config.getini(\"junit_suite_name\"),\n config.getini(\"junit_logging\"),\n config.getini(\"junit_duration_report\"),\n config.getini(\"junit_family\"),\n )\n config.pluginmanager.register(config._xml)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_pytest_unconfigure_mangle_test_address.return.names": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_pytest_unconfigure_mangle_test_address.return.names", "embedding": null, "metadata": {"file_path": "src/_pytest/junitxml.py", "file_name": "junitxml.py", "file_type": "text/x-python", "category": "implementation", "start_line": 443, "end_line": 462, "span_ids": ["pytest_unconfigure", "mangle_test_address"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_unconfigure(config):\n xml = getattr(config, \"_xml\", None)\n if xml:\n del config._xml\n config.pluginmanager.unregister(xml)\n\n\ndef mangle_test_address(address):\n path, possible_open_bracket, params = address.partition(\"[\")\n names = path.split(\"::\")\n try:\n names.remove(\"()\")\n except ValueError:\n pass\n # convert file path to dotted path\n names[0] = names[0].replace(nodes.SEP, \".\")\n names[0] = _py_ext_re.sub(\"\", names[0])\n # put any params back\n names[-1] += possible_open_bracket + params\n return names", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_LogXML_LogXML.finalize.if_reporter_is_not_None_.reporter_finalize_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_LogXML_LogXML.finalize.if_reporter_is_not_None_.reporter_finalize_", "embedding": null, "metadata": {"file_path": "src/_pytest/junitxml.py", "file_name": "junitxml.py", "file_type": "text/x-python", "category": "implementation", "start_line": 426, "end_line": 461, "span_ids": ["LogXML", "LogXML.finalize"], "tokens": 292}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LogXML(object):\n def __init__(\n self,\n logfile,\n prefix,\n suite_name=\"pytest\",\n logging=\"no\",\n report_duration=\"total\",\n family=\"xunit1\",\n ):\n logfile = os.path.expanduser(os.path.expandvars(logfile))\n self.logfile = os.path.normpath(os.path.abspath(logfile))\n self.prefix = prefix\n self.suite_name = suite_name\n self.logging = logging\n self.report_duration = report_duration\n self.family = family\n self.stats = dict.fromkeys([\"error\", \"passed\", \"failure\", \"skipped\"], 0)\n self.node_reporters = {} # nodeid -> _NodeReporter\n self.node_reporters_ordered = []\n self.global_properties = []\n # List of reports that failed on call but teardown is pending.\n self.open_reports = []\n self.cnt_double_fail_tests = 0\n\n # Replaces convenience family with real family\n if self.family == \"legacy\":\n self.family = \"xunit1\"\n\n def finalize(self, report):\n nodeid = getattr(report, \"nodeid\", report)\n # local hack to handle xdist report order\n slavenode = getattr(report, \"node\", None)\n reporter = self.node_reporters.pop((nodeid, slavenode))\n if reporter is not None:\n reporter.finalize()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_LogXML.node_reporter_LogXML._opentestcase.return.reporter": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_LogXML.node_reporter_LogXML._opentestcase.return.reporter", "embedding": null, "metadata": {"file_path": "src/_pytest/junitxml.py", "file_name": "junitxml.py", "file_type": "text/x-python", "category": "implementation", "start_line": 503, "end_line": 528, "span_ids": ["LogXML.add_stats", "LogXML._opentestcase", "LogXML.node_reporter"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LogXML(object):\n\n def node_reporter(self, report):\n nodeid = getattr(report, \"nodeid\", report)\n # local hack to handle xdist report order\n slavenode = getattr(report, \"node\", None)\n\n key = nodeid, slavenode\n\n if key in self.node_reporters:\n # TODO: breasks for --dist=each\n return self.node_reporters[key]\n\n reporter = _NodeReporter(nodeid, self)\n\n self.node_reporters[key] = reporter\n self.node_reporters_ordered.append(reporter)\n\n return reporter\n\n def add_stats(self, key):\n if key in self.stats:\n self.stats[key] += 1\n\n def _opentestcase(self, report):\n reporter = self.node_reporter(report)\n reporter.record_testreport(report)\n return reporter", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_LogXML.pytest_runtest_logreport_LogXML.pytest_runtest_logreport.if_report_when_teardo.if_close_report_.self_open_reports_remove_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_LogXML.pytest_runtest_logreport_LogXML.pytest_runtest_logreport.if_report_when_teardo.if_close_report_.self_open_reports_remove_", "embedding": null, "metadata": {"file_path": "src/_pytest/junitxml.py", "file_name": "junitxml.py", "file_type": "text/x-python", "category": "implementation", "start_line": 530, "end_line": 614, "span_ids": ["LogXML.pytest_runtest_logreport"], "tokens": 626}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LogXML(object):\n\n def pytest_runtest_logreport(self, report):\n \"\"\"handle a setup/call/teardown report, generating the appropriate\n xml tags as necessary.\n\n note: due to plugins like xdist, this hook may be called in interlaced\n order with reports from other nodes. for example:\n\n usual call order:\n -> setup node1\n -> call node1\n -> teardown node1\n -> setup node2\n -> call node2\n -> teardown node2\n\n possible call order in xdist:\n -> setup node1\n -> call node1\n -> setup node2\n -> call node2\n -> teardown node2\n -> teardown node1\n \"\"\"\n close_report = None\n if report.passed:\n if report.when == \"call\": # ignore setup/teardown\n reporter = self._opentestcase(report)\n reporter.append_pass(report)\n elif report.failed:\n if report.when == \"teardown\":\n # The following vars are needed when xdist plugin is used\n report_wid = getattr(report, \"worker_id\", None)\n report_ii = getattr(report, \"item_index\", None)\n close_report = next(\n (\n rep\n for rep in self.open_reports\n if (\n rep.nodeid == report.nodeid\n and getattr(rep, \"item_index\", None) == report_ii\n and getattr(rep, \"worker_id\", None) == report_wid\n )\n ),\n None,\n )\n if close_report:\n # We need to open new testcase in case we have failure in\n # call and error in teardown in order to follow junit\n # schema\n self.finalize(close_report)\n self.cnt_double_fail_tests += 1\n reporter = self._opentestcase(report)\n if report.when == \"call\":\n reporter.append_failure(report)\n self.open_reports.append(report)\n else:\n reporter.append_error(report)\n elif report.skipped:\n reporter = self._opentestcase(report)\n reporter.append_skipped(report)\n self.update_testcase_duration(report)\n if report.when == \"teardown\":\n reporter = self._opentestcase(report)\n reporter.write_captured_output(report)\n\n for propname, propvalue in report.user_properties:\n reporter.add_property(propname, propvalue)\n\n self.finalize(report)\n report_wid = getattr(report, \"worker_id\", None)\n report_ii = getattr(report, \"item_index\", None)\n close_report = next(\n (\n rep\n for rep in self.open_reports\n if (\n rep.nodeid == report.nodeid\n and getattr(rep, \"item_index\", None) == report_ii\n and getattr(rep, \"worker_id\", None) == report_wid\n )\n ),\n None,\n )\n if close_report:\n self.open_reports.remove(close_report)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_LogXML.update_testcase_duration_LogXML.pytest_sessionstart.self.suite_start_time.time_time_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_LogXML.update_testcase_duration_LogXML.pytest_sessionstart.self.suite_start_time.time_time_", "embedding": null, "metadata": {"file_path": "src/_pytest/junitxml.py", "file_name": "junitxml.py", "file_type": "text/x-python", "category": "implementation", "start_line": 616, "end_line": 638, "span_ids": ["LogXML.pytest_internalerror", "LogXML.pytest_collectreport", "LogXML.update_testcase_duration", "LogXML.pytest_sessionstart"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LogXML(object):\n\n def update_testcase_duration(self, report):\n \"\"\"accumulates total duration for nodeid from given report and updates\n the Junit.testcase with the new total if already created.\n \"\"\"\n if self.report_duration == \"total\" or report.when == self.report_duration:\n reporter = self.node_reporter(report)\n reporter.duration += getattr(report, \"duration\", 0.0)\n\n def pytest_collectreport(self, report):\n if not report.passed:\n reporter = self._opentestcase(report)\n if report.failed:\n reporter.append_collect_error(report)\n else:\n reporter.append_collect_skipped(report)\n\n def pytest_internalerror(self, excrepr):\n reporter = self.node_reporter(\"internal\")\n reporter.attrs.update(classname=\"pytest\", name=\"internal\")\n reporter._add_simple(Junit.error, \"internal error\", excrepr)\n\n def pytest_sessionstart(self):\n self.suite_start_time = time.time()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_LogXML.pytest_sessionfinish_LogXML.pytest_sessionfinish.logfile_close_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_LogXML.pytest_sessionfinish_LogXML.pytest_sessionfinish.logfile_close_", "embedding": null, "metadata": {"file_path": "src/_pytest/junitxml.py", "file_name": "junitxml.py", "file_type": "text/x-python", "category": "implementation", "start_line": 640, "end_line": 669, "span_ids": ["LogXML.pytest_sessionfinish"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LogXML(object):\n\n def pytest_sessionfinish(self):\n dirname = os.path.dirname(os.path.abspath(self.logfile))\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n logfile = open(self.logfile, \"w\", encoding=\"utf-8\")\n suite_stop_time = time.time()\n suite_time_delta = suite_stop_time - self.suite_start_time\n\n numtests = (\n self.stats[\"passed\"]\n + self.stats[\"failure\"]\n + self.stats[\"skipped\"]\n + self.stats[\"error\"]\n - self.cnt_double_fail_tests\n )\n logfile.write('')\n\n logfile.write(\n Junit.testsuite(\n self._get_global_properties_node(),\n [x.to_xml() for x in self.node_reporters_ordered],\n name=self.suite_name,\n errors=self.stats[\"error\"],\n failures=self.stats[\"failure\"],\n skipped=self.stats[\"skipped\"],\n tests=numtests,\n time=\"%.3f\" % suite_time_delta,\n ).unicode(indent=0)\n )\n logfile.close()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_LogXML.pytest_terminal_summary_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_LogXML.pytest_terminal_summary_", "embedding": null, "metadata": {"file_path": "src/_pytest/junitxml.py", "file_name": "junitxml.py", "file_type": "text/x-python", "category": "implementation", "start_line": 631, "end_line": 648, "span_ids": ["LogXML._get_global_properties_node", "LogXML.pytest_terminal_summary", "LogXML.add_global_property"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LogXML(object):\n\n def pytest_terminal_summary(self, terminalreporter):\n terminalreporter.write_sep(\"-\", \"generated xml file: %s\" % (self.logfile))\n\n def add_global_property(self, name, value):\n self.global_properties.append((str(name), bin_xml_escape(value)))\n\n def _get_global_properties_node(self):\n \"\"\"Return a Junit node containing custom properties, if any.\n \"\"\"\n if self.global_properties:\n return Junit.properties(\n [\n Junit.property(name=name, value=value)\n for name, value in self.global_properties\n ]\n )\n return \"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py__Access_and_control_lo_DEFAULT_LOG_DATE_FORMAT._H_M_S_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py__Access_and_control_lo_DEFAULT_LOG_DATE_FORMAT._H_M_S_", "embedding": null, "metadata": {"file_path": "src/_pytest/logging.py", "file_name": "logging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 19, "span_ids": ["imports", "docstring", "impl"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" Access and control log capturing. \"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport re\nfrom contextlib import contextmanager\n\nimport py\nimport six\n\nimport pytest\nfrom _pytest.compat import dummy_context_manager\nfrom _pytest.config import create_terminal_writer\nfrom _pytest.pathlib import Path\n\nDEFAULT_LOG_FORMAT = \"%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s\"\nDEFAULT_LOG_DATE_FORMAT = \"%H:%M:%S\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_ColoredLevelFormatter_get_option_ini.for_name_in_names_.if_ret_.return.ret": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_ColoredLevelFormatter_get_option_ini.for_name_in_names_.if_ret_.return.ret", "embedding": null, "metadata": {"file_path": "src/_pytest/logging.py", "file_name": "logging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 22, "end_line": 80, "span_ids": ["get_option_ini", "ColoredLevelFormatter", "ColoredLevelFormatter.format"], "tokens": 468}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ColoredLevelFormatter(logging.Formatter):\n \"\"\"\n Colorize the %(levelname)..s part of the log format passed to __init__.\n \"\"\"\n\n LOGLEVEL_COLOROPTS = {\n logging.CRITICAL: {\"red\"},\n logging.ERROR: {\"red\", \"bold\"},\n logging.WARNING: {\"yellow\"},\n logging.WARN: {\"yellow\"},\n logging.INFO: {\"green\"},\n logging.DEBUG: {\"purple\"},\n logging.NOTSET: set(),\n }\n LEVELNAME_FMT_REGEX = re.compile(r\"%\\(levelname\\)([+-]?\\d*s)\")\n\n def __init__(self, terminalwriter, *args, **kwargs):\n super(ColoredLevelFormatter, self).__init__(*args, **kwargs)\n if six.PY2:\n self._original_fmt = self._fmt\n else:\n self._original_fmt = self._style._fmt\n self._level_to_fmt_mapping = {}\n\n levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt)\n if not levelname_fmt_match:\n return\n levelname_fmt = levelname_fmt_match.group()\n\n for level, color_opts in self.LOGLEVEL_COLOROPTS.items():\n formatted_levelname = levelname_fmt % {\n \"levelname\": logging.getLevelName(level)\n }\n\n # add ANSI escape sequences around the formatted levelname\n color_kwargs = {name: True for name in color_opts}\n colorized_formatted_levelname = terminalwriter.markup(\n formatted_levelname, **color_kwargs\n )\n self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub(\n colorized_formatted_levelname, self._fmt\n )\n\n def format(self, record):\n fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt)\n if six.PY2:\n self._fmt = fmt\n else:\n self._style._fmt = fmt\n return super(ColoredLevelFormatter, self).format(record)\n\n\ndef get_option_ini(config, *names):\n for name in names:\n ret = config.getoption(name) # 'default' arg won't work as expected\n if ret is None:\n ret = config.getini(name)\n if ret:\n return ret", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_pytest_addoption_pytest_addoption.None_11": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_pytest_addoption_pytest_addoption.None_11", "embedding": null, "metadata": {"file_path": "src/_pytest/logging.py", "file_name": "logging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 83, "end_line": 164, "span_ids": ["pytest_addoption"], "tokens": 534}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_addoption(parser):\n \"\"\"Add options to control log capturing.\"\"\"\n group = parser.getgroup(\"logging\")\n\n def add_option_ini(option, dest, default=None, type=None, **kwargs):\n parser.addini(\n dest, default=default, type=type, help=\"default value for \" + option\n )\n group.addoption(option, dest=dest, **kwargs)\n\n add_option_ini(\n \"--no-print-logs\",\n dest=\"log_print\",\n action=\"store_const\",\n const=False,\n default=True,\n type=\"bool\",\n help=\"disable printing caught logs on failed tests.\",\n )\n add_option_ini(\n \"--log-level\",\n dest=\"log_level\",\n default=None,\n help=\"logging level used by the logging module\",\n )\n add_option_ini(\n \"--log-format\",\n dest=\"log_format\",\n default=DEFAULT_LOG_FORMAT,\n help=\"log format as used by the logging module.\",\n )\n add_option_ini(\n \"--log-date-format\",\n dest=\"log_date_format\",\n default=DEFAULT_LOG_DATE_FORMAT,\n help=\"log date format as used by the logging module.\",\n )\n parser.addini(\n \"log_cli\",\n default=False,\n type=\"bool\",\n help='enable log display during test run (also known as \"live logging\").',\n )\n add_option_ini(\n \"--log-cli-level\", dest=\"log_cli_level\", default=None, help=\"cli logging level.\"\n )\n add_option_ini(\n \"--log-cli-format\",\n dest=\"log_cli_format\",\n default=None,\n help=\"log format as used by the logging module.\",\n )\n add_option_ini(\n \"--log-cli-date-format\",\n dest=\"log_cli_date_format\",\n default=None,\n help=\"log date format as used by the logging module.\",\n )\n add_option_ini(\n \"--log-file\",\n dest=\"log_file\",\n default=None,\n help=\"path to a file when logging will be written to.\",\n )\n add_option_ini(\n \"--log-file-level\",\n dest=\"log_file_level\",\n default=None,\n help=\"log file logging level.\",\n )\n add_option_ini(\n \"--log-file-format\",\n dest=\"log_file_format\",\n default=DEFAULT_LOG_FORMAT,\n help=\"log format as used by the logging module.\",\n )\n add_option_ini(\n \"--log-file-date-format\",\n dest=\"log_file_date_format\",\n default=DEFAULT_LOG_DATE_FORMAT,\n help=\"log date format as used by the logging module.\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_catching_logs_catching_logs.try_.finally_.if_add_new_handler_.root_logger_removeHandler": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_catching_logs_catching_logs.try_.finally_.if_add_new_handler_.root_logger_removeHandler", "embedding": null, "metadata": {"file_path": "src/_pytest/logging.py", "file_name": "logging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 167, "end_line": 192, "span_ids": ["catching_logs"], "tokens": 165}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@contextmanager\ndef catching_logs(handler, formatter=None, level=None):\n \"\"\"Context manager that prepares the whole logging machinery properly.\"\"\"\n root_logger = logging.getLogger()\n\n if formatter is not None:\n handler.setFormatter(formatter)\n if level is not None:\n handler.setLevel(level)\n\n # Adding the same handler twice would confuse logging system.\n # Just don't do that.\n add_new_handler = handler not in root_logger.handlers\n\n if add_new_handler:\n root_logger.addHandler(handler)\n if level is not None:\n orig_level = root_logger.level\n root_logger.setLevel(min(orig_level, level))\n try:\n yield handler\n finally:\n if level is not None:\n root_logger.setLevel(orig_level)\n if add_new_handler:\n root_logger.removeHandler(handler)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LogCaptureHandler_LogCaptureHandler.reset.self.stream.py_io_TextIO_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LogCaptureHandler_LogCaptureHandler.reset.self.stream.py_io_TextIO_", "embedding": null, "metadata": {"file_path": "src/_pytest/logging.py", "file_name": "logging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 195, "end_line": 210, "span_ids": ["LogCaptureHandler.emit", "LogCaptureHandler", "LogCaptureHandler.reset"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LogCaptureHandler(logging.StreamHandler):\n \"\"\"A logging handler that stores log records and the log text.\"\"\"\n\n def __init__(self):\n \"\"\"Creates a new log handler.\"\"\"\n logging.StreamHandler.__init__(self, py.io.TextIO())\n self.records = []\n\n def emit(self, record):\n \"\"\"Keep the log records in a list in addition to the log text.\"\"\"\n self.records.append(record)\n logging.StreamHandler.emit(self, record)\n\n def reset(self):\n self.records = []\n self.stream = py.io.TextIO()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LogCaptureFixture_LogCaptureFixture.handler.return.self__item_catch_log_hand": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LogCaptureFixture_LogCaptureFixture.handler.return.self__item_catch_log_hand", "embedding": null, "metadata": {"file_path": "src/_pytest/logging.py", "file_name": "logging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 213, "end_line": 237, "span_ids": ["LogCaptureFixture.handler", "LogCaptureFixture._finalize", "LogCaptureFixture"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LogCaptureFixture(object):\n \"\"\"Provides access and control of log capturing.\"\"\"\n\n def __init__(self, item):\n \"\"\"Creates a new funcarg.\"\"\"\n self._item = item\n # dict of log name -> log level\n self._initial_log_levels = {} # Dict[str, int]\n\n def _finalize(self):\n \"\"\"Finalizes the fixture.\n\n This restores the log levels changed by :meth:`set_level`.\n \"\"\"\n # restore log levels\n for logger_name, level in self._initial_log_levels.items():\n logger = logging.getLogger(logger_name)\n logger.setLevel(level)\n\n @property\n def handler(self):\n \"\"\"\n :rtype: LogCaptureHandler\n \"\"\"\n return self._item.catch_log_handler", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LogCaptureFixture.get_records_LogCaptureFixture.get_records.if_handler_.else_.return._": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LogCaptureFixture.get_records_LogCaptureFixture.get_records.if_handler_.else_.return._", "embedding": null, "metadata": {"file_path": "src/_pytest/logging.py", "file_name": "logging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 239, "end_line": 255, "span_ids": ["LogCaptureFixture.get_records"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LogCaptureFixture(object):\n\n def get_records(self, when):\n \"\"\"\n Get the logging records for one of the possible test phases.\n\n :param str when:\n Which test phase to obtain the records from. Valid values are: \"setup\", \"call\" and \"teardown\".\n\n :rtype: List[logging.LogRecord]\n :return: the list of captured records at the given stage\n\n .. versionadded:: 3.4\n \"\"\"\n handler = self._item.catch_log_handlers.get(when)\n if handler:\n return handler.records\n else:\n return []", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LogCaptureFixture.text_LogCaptureFixture.record_tuples.return._r_name_r_levelno_r_ge": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LogCaptureFixture.text_LogCaptureFixture.record_tuples.return._r_name_r_levelno_r_ge", "embedding": null, "metadata": {"file_path": "src/_pytest/logging.py", "file_name": "logging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 257, "end_line": 276, "span_ids": ["LogCaptureFixture.records", "LogCaptureFixture.text", "LogCaptureFixture.record_tuples"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LogCaptureFixture(object):\n\n @property\n def text(self):\n \"\"\"Returns the log text.\"\"\"\n return self.handler.stream.getvalue()\n\n @property\n def records(self):\n \"\"\"Returns the list of log records.\"\"\"\n return self.handler.records\n\n @property\n def record_tuples(self):\n \"\"\"Returns a list of a stripped down version of log records intended\n for use in assertion comparison.\n\n The format of the tuple is:\n\n (logger_name, log_level, message)\n \"\"\"\n return [(r.name, r.levelno, r.getMessage()) for r in self.records]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LogCaptureFixture.messages_LogCaptureFixture.messages.return._r_getMessage_for_r_in_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LogCaptureFixture.messages_LogCaptureFixture.messages.return._r_getMessage_for_r_in_", "embedding": null, "metadata": {"file_path": "src/_pytest/logging.py", "file_name": "logging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 278, "end_line": 292, "span_ids": ["LogCaptureFixture.messages"], "tokens": 165}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LogCaptureFixture(object):\n\n @property\n def messages(self):\n \"\"\"Returns a list of format-interpolated log messages.\n\n Unlike 'records', which contains the format string and parameters for interpolation, log messages in this list\n are all interpolated.\n Unlike 'text', which contains the output from the handler, log messages in this list are unadorned with\n levels, timestamps, etc, making exact comparisons more reliable.\n\n Note that traceback or stack info (from :func:`logging.exception` or the `exc_info` or `stack_info` arguments\n to the logging functions) is not included, as this is added by the formatter in the handler.\n\n .. versionadded:: 3.7\n \"\"\"\n return [r.getMessage() for r in self.records]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LogCaptureFixture.clear_LogCaptureFixture.set_level.logger_setLevel_level_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LogCaptureFixture.clear_LogCaptureFixture.set_level.logger_setLevel_level_", "embedding": null, "metadata": {"file_path": "src/_pytest/logging.py", "file_name": "logging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 294, "end_line": 313, "span_ids": ["LogCaptureFixture.clear", "LogCaptureFixture.set_level"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LogCaptureFixture(object):\n\n def clear(self):\n \"\"\"Reset the list of log records and the captured log text.\"\"\"\n self.handler.reset()\n\n def set_level(self, level, logger=None):\n \"\"\"Sets the level for capturing of logs. The level will be restored to its previous value at the end of\n the test.\n\n :param int level: the logger to level.\n :param str logger: the logger to update the level. If not given, the root logger level is updated.\n\n .. versionchanged:: 3.4\n The levels of the loggers changed by this function will be restored to their initial values at the\n end of the test.\n \"\"\"\n logger_name = logger\n logger = logging.getLogger(logger_name)\n # save the original log-level to restore it during teardown\n self._initial_log_levels.setdefault(logger_name, logger.level)\n logger.setLevel(level)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LogCaptureFixture.at_level_LogCaptureFixture.at_level.try_.finally_.logger_setLevel_orig_leve": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LogCaptureFixture.at_level_LogCaptureFixture.at_level.try_.finally_.logger_setLevel_orig_leve", "embedding": null, "metadata": {"file_path": "src/_pytest/logging.py", "file_name": "logging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 315, "end_line": 329, "span_ids": ["LogCaptureFixture.at_level"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LogCaptureFixture(object):\n\n @contextmanager\n def at_level(self, level, logger=None):\n \"\"\"Context manager that sets the level for capturing of logs. After the end of the 'with' statement the\n level is restored to its original value.\n\n :param int level: the logger to level.\n :param str logger: the logger to update the level. If not given, the root logger level is updated.\n \"\"\"\n logger = logging.getLogger(logger)\n orig_level = logger.level\n logger.setLevel(level)\n try:\n yield\n finally:\n logger.setLevel(orig_level)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_caplog_caplog.result__finalize_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_caplog_caplog.result__finalize_", "embedding": null, "metadata": {"file_path": "src/_pytest/logging.py", "file_name": "logging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 332, "end_line": 345, "span_ids": ["caplog"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture\ndef caplog(request):\n \"\"\"Access and control log capturing.\n\n Captured logs are available through the following properties/methods::\n\n * caplog.text -> string containing formatted log output\n * caplog.records -> list of logging.LogRecord instances\n * caplog.record_tuples -> list of (logger_name, level, message) tuples\n * caplog.clear() -> clear captured records and formatted log output string\n \"\"\"\n result = LogCaptureFixture(request.node)\n yield result\n result._finalize()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_get_actual_log_level_pytest_configure.config_pluginmanager_regi": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_get_actual_log_level_pytest_configure.config_pluginmanager_regi", "embedding": null, "metadata": {"file_path": "src/_pytest/logging.py", "file_name": "logging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 348, "end_line": 376, "span_ids": ["get_actual_log_level", "pytest_configure"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_actual_log_level(config, *setting_names):\n \"\"\"Return the actual logging level.\"\"\"\n\n for setting_name in setting_names:\n log_level = config.getoption(setting_name)\n if log_level is None:\n log_level = config.getini(setting_name)\n if log_level:\n break\n else:\n return\n\n if isinstance(log_level, six.string_types):\n log_level = log_level.upper()\n try:\n return int(getattr(logging, log_level, log_level))\n except ValueError:\n # Python logging does not recognise this as a logging level\n raise pytest.UsageError(\n \"'{}' is not recognized as a logging level name for \"\n \"'{}'. Please consider passing the \"\n \"logging level num instead.\".format(log_level, setting_name)\n )\n\n\n# run after terminalreporter/capturemanager are configured\n@pytest.hookimpl(trylast=True)\ndef pytest_configure(config):\n config.pluginmanager.register(LoggingPlugin(config), \"logging-plugin\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LoggingPlugin_LoggingPlugin.__init__.None_2.self__setup_cli_logging_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LoggingPlugin_LoggingPlugin.__init__.None_2.self__setup_cli_logging_", "embedding": null, "metadata": {"file_path": "src/_pytest/logging.py", "file_name": "logging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 379, "end_line": 428, "span_ids": ["LoggingPlugin"], "tokens": 405}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LoggingPlugin(object):\n \"\"\"Attaches to the logging module and captures log messages for each test.\n \"\"\"\n\n def __init__(self, config):\n \"\"\"Creates a new plugin to capture log messages.\n\n The formatter can be safely shared across all handlers so\n create a single one for the entire test session here.\n \"\"\"\n self._config = config\n\n # enable verbose output automatically if live logging is enabled\n if self._log_cli_enabled() and config.getoption(\"verbose\") < 1:\n config.option.verbose = 1\n\n self.print_logs = get_option_ini(config, \"log_print\")\n self.formatter = logging.Formatter(\n get_option_ini(config, \"log_format\"),\n get_option_ini(config, \"log_date_format\"),\n )\n self.log_level = get_actual_log_level(config, \"log_level\")\n\n self.log_file_level = get_actual_log_level(config, \"log_file_level\")\n self.log_file_format = get_option_ini(config, \"log_file_format\", \"log_format\")\n self.log_file_date_format = get_option_ini(\n config, \"log_file_date_format\", \"log_date_format\"\n )\n self.log_file_formatter = logging.Formatter(\n self.log_file_format, datefmt=self.log_file_date_format\n )\n\n log_file = get_option_ini(config, \"log_file\")\n if log_file:\n self.log_file_handler = logging.FileHandler(\n log_file, mode=\"w\", encoding=\"UTF-8\"\n )\n self.log_file_handler.setFormatter(self.log_file_formatter)\n else:\n self.log_file_handler = None\n\n self.log_cli_handler = None\n\n self.live_logs_context = lambda: dummy_context_manager()\n # Note that the lambda for the live_logs_context is needed because\n # live_logs_context can otherwise not be entered multiple times due\n # to limitations of contextlib.contextmanager.\n\n if self._log_cli_enabled():\n self._setup_cli_logging()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LoggingPlugin._setup_cli_logging_LoggingPlugin._setup_cli_logging.self.live_logs_context.lambda_catching_logs_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LoggingPlugin._setup_cli_logging_LoggingPlugin._setup_cli_logging.self.live_logs_context.lambda_catching_logs_", "embedding": null, "metadata": {"file_path": "src/_pytest/logging.py", "file_name": "logging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 430, "end_line": 461, "span_ids": ["LoggingPlugin._setup_cli_logging"], "tokens": 293}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LoggingPlugin(object):\n\n def _setup_cli_logging(self):\n config = self._config\n terminal_reporter = config.pluginmanager.get_plugin(\"terminalreporter\")\n if terminal_reporter is None:\n # terminal reporter is disabled e.g. by pytest-xdist.\n return\n\n capture_manager = config.pluginmanager.get_plugin(\"capturemanager\")\n # if capturemanager plugin is disabled, live logging still works.\n log_cli_handler = _LiveLoggingStreamHandler(terminal_reporter, capture_manager)\n log_cli_format = get_option_ini(config, \"log_cli_format\", \"log_format\")\n log_cli_date_format = get_option_ini(\n config, \"log_cli_date_format\", \"log_date_format\"\n )\n if (\n config.option.color != \"no\"\n and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(log_cli_format)\n ):\n log_cli_formatter = ColoredLevelFormatter(\n create_terminal_writer(config),\n log_cli_format,\n datefmt=log_cli_date_format,\n )\n else:\n log_cli_formatter = logging.Formatter(\n log_cli_format, datefmt=log_cli_date_format\n )\n log_cli_level = get_actual_log_level(config, \"log_cli_level\", \"log_level\")\n self.log_cli_handler = log_cli_handler\n self.live_logs_context = lambda: catching_logs(\n log_cli_handler, formatter=log_cli_formatter, level=log_cli_level\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LoggingPlugin.set_log_path_LoggingPlugin.set_log_path.self_log_file_handler_set": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LoggingPlugin.set_log_path_LoggingPlugin.set_log_path.self_log_file_handler_set", "embedding": null, "metadata": {"file_path": "src/_pytest/logging.py", "file_name": "logging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 463, "end_line": 482, "span_ids": ["LoggingPlugin.set_log_path"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LoggingPlugin(object):\n\n def set_log_path(self, fname):\n \"\"\"Public method, which can set filename parameter for\n Logging.FileHandler(). Also creates parent directory if\n it does not exist.\n\n .. warning::\n Please considered as an experimental API.\n \"\"\"\n fname = Path(fname)\n\n if not fname.is_absolute():\n fname = Path(self._config.rootdir, fname)\n\n if not fname.parent.exists():\n fname.parent.mkdir(exist_ok=True, parents=True)\n\n self.log_file_handler = logging.FileHandler(\n str(fname), mode=\"w\", encoding=\"UTF-8\"\n )\n self.log_file_handler.setFormatter(self.log_file_formatter)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LoggingPlugin._log_cli_enabled_LoggingPlugin._runtest_for.with_self__runtest_for_ma.if_self_log_file_handler_.else_.yield": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LoggingPlugin._log_cli_enabled_LoggingPlugin._runtest_for.with_self__runtest_for_ma.if_self_log_file_handler_.else_.yield", "embedding": null, "metadata": {"file_path": "src/_pytest/logging.py", "file_name": "logging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 484, "end_line": 511, "span_ids": ["LoggingPlugin._log_cli_enabled", "LoggingPlugin._runtest_for", "LoggingPlugin.pytest_collection"], "tokens": 214}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LoggingPlugin(object):\n\n def _log_cli_enabled(self):\n \"\"\"Return True if log_cli should be considered enabled, either explicitly\n or because --log-cli-level was given in the command-line.\n \"\"\"\n return self._config.getoption(\n \"--log-cli-level\"\n ) is not None or self._config.getini(\"log_cli\")\n\n @pytest.hookimpl(hookwrapper=True, tryfirst=True)\n def pytest_collection(self):\n with self.live_logs_context():\n if self.log_cli_handler:\n self.log_cli_handler.set_when(\"collection\")\n\n if self.log_file_handler is not None:\n with catching_logs(self.log_file_handler, level=self.log_file_level):\n yield\n else:\n yield\n\n @contextmanager\n def _runtest_for(self, item, when):\n with self._runtest_for_main(item, when):\n if self.log_file_handler is not None:\n with catching_logs(self.log_file_handler, level=self.log_file_level):\n yield\n else:\n yield", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LoggingPlugin._runtest_for_main_LoggingPlugin._runtest_for_main.with_catching_logs_.if_self_print_logs_.item_add_report_section_w": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LoggingPlugin._runtest_for_main_LoggingPlugin._runtest_for_main.with_catching_logs_.if_self_print_logs_.item_add_report_section_w", "embedding": null, "metadata": {"file_path": "src/_pytest/logging.py", "file_name": "logging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 513, "end_line": 540, "span_ids": ["LoggingPlugin._runtest_for_main"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LoggingPlugin(object):\n\n @contextmanager\n def _runtest_for_main(self, item, when):\n \"\"\"Implements the internals of pytest_runtest_xxx() hook.\"\"\"\n with catching_logs(\n LogCaptureHandler(), formatter=self.formatter, level=self.log_level\n ) as log_handler:\n if self.log_cli_handler:\n self.log_cli_handler.set_when(when)\n\n if item is None:\n yield # run the test\n return\n\n if not hasattr(item, \"catch_log_handlers\"):\n item.catch_log_handlers = {}\n item.catch_log_handlers[when] = log_handler\n item.catch_log_handler = log_handler\n try:\n yield # run test\n finally:\n if when == \"teardown\":\n del item.catch_log_handler\n del item.catch_log_handlers\n\n if self.print_logs:\n # Add a captured log section to the report.\n log = log_handler.stream.getvalue().strip()\n item.add_report_section(when, \"log\", log)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LoggingPlugin.pytest_runtest_setup_LoggingPlugin.pytest_runtest_logreport.with_self__runtest_for_No.yield": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LoggingPlugin.pytest_runtest_setup_LoggingPlugin.pytest_runtest_logreport.with_self__runtest_for_No.yield", "embedding": null, "metadata": {"file_path": "src/_pytest/logging.py", "file_name": "logging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 542, "end_line": 572, "span_ids": ["LoggingPlugin.pytest_runtest_teardown", "LoggingPlugin.pytest_runtest_logstart", "LoggingPlugin.pytest_runtest_setup", "LoggingPlugin.pytest_runtest_logfinish", "LoggingPlugin.pytest_runtest_call", "LoggingPlugin.pytest_runtest_logreport"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LoggingPlugin(object):\n\n @pytest.hookimpl(hookwrapper=True)\n def pytest_runtest_setup(self, item):\n with self._runtest_for(item, \"setup\"):\n yield\n\n @pytest.hookimpl(hookwrapper=True)\n def pytest_runtest_call(self, item):\n with self._runtest_for(item, \"call\"):\n yield\n\n @pytest.hookimpl(hookwrapper=True)\n def pytest_runtest_teardown(self, item):\n with self._runtest_for(item, \"teardown\"):\n yield\n\n @pytest.hookimpl(hookwrapper=True)\n def pytest_runtest_logstart(self):\n if self.log_cli_handler:\n self.log_cli_handler.reset()\n with self._runtest_for(None, \"start\"):\n yield\n\n @pytest.hookimpl(hookwrapper=True)\n def pytest_runtest_logfinish(self):\n with self._runtest_for(None, \"finish\"):\n yield\n\n @pytest.hookimpl(hookwrapper=True)\n def pytest_runtest_logreport(self):\n with self._runtest_for(None, \"logreport\"):\n yield", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LoggingPlugin.pytest_sessionfinish_LoggingPlugin.pytest_sessionfinish.with_self_live_logs_conte.if_self_log_file_handler_.else_.yield": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LoggingPlugin.pytest_sessionfinish_LoggingPlugin.pytest_sessionfinish.with_self_live_logs_conte.if_self_log_file_handler_.else_.yield", "embedding": null, "metadata": {"file_path": "src/_pytest/logging.py", "file_name": "logging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 574, "end_line": 590, "span_ids": ["LoggingPlugin.pytest_sessionfinish"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LoggingPlugin(object):\n\n @pytest.hookimpl(hookwrapper=True, tryfirst=True)\n def pytest_sessionfinish(self):\n with self.live_logs_context():\n if self.log_cli_handler:\n self.log_cli_handler.set_when(\"sessionfinish\")\n if self.log_file_handler is not None:\n try:\n with catching_logs(\n self.log_file_handler, level=self.log_file_level\n ):\n yield\n finally:\n # Close the FileHandler explicitly.\n # (logging.shutdown might have lost the weakref?!)\n self.log_file_handler.close()\n else:\n yield", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LoggingPlugin.pytest_sessionstart_LoggingPlugin.pytest_runtestloop.with_self_live_logs_conte.if_self_log_file_handler_.else_._run_all_the_tests": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py_LoggingPlugin.pytest_sessionstart_LoggingPlugin.pytest_runtestloop.with_self_live_logs_conte.if_self_log_file_handler_.else_._run_all_the_tests", "embedding": null, "metadata": {"file_path": "src/_pytest/logging.py", "file_name": "logging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 592, "end_line": 611, "span_ids": ["LoggingPlugin.pytest_runtestloop", "LoggingPlugin.pytest_sessionstart"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LoggingPlugin(object):\n\n @pytest.hookimpl(hookwrapper=True, tryfirst=True)\n def pytest_sessionstart(self):\n with self.live_logs_context():\n if self.log_cli_handler:\n self.log_cli_handler.set_when(\"sessionstart\")\n if self.log_file_handler is not None:\n with catching_logs(self.log_file_handler, level=self.log_file_level):\n yield\n else:\n yield\n\n @pytest.hookimpl(hookwrapper=True)\n def pytest_runtestloop(self, session):\n \"\"\"Runs all collected test items.\"\"\"\n with self.live_logs_context():\n if self.log_file_handler is not None:\n with catching_logs(self.log_file_handler, level=self.log_file_level):\n yield # run all the tests\n else:\n yield # run all the tests", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py__LiveLoggingStreamHandler__LiveLoggingStreamHandler.set_when.if_when_start_.self._test_outcome_written.False": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py__LiveLoggingStreamHandler__LiveLoggingStreamHandler.set_when.if_when_start_.self._test_outcome_written.False", "embedding": null, "metadata": {"file_path": "src/_pytest/logging.py", "file_name": "logging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 614, "end_line": 643, "span_ids": ["_LiveLoggingStreamHandler", "_LiveLoggingStreamHandler.reset", "_LiveLoggingStreamHandler.set_when"], "tokens": 247}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _LiveLoggingStreamHandler(logging.StreamHandler):\n \"\"\"\n Custom StreamHandler used by the live logging feature: it will write a newline before the first log message\n in each test.\n\n During live logging we must also explicitly disable stdout/stderr capturing otherwise it will get captured\n and won't appear in the terminal.\n \"\"\"\n\n def __init__(self, terminal_reporter, capture_manager):\n \"\"\"\n :param _pytest.terminal.TerminalReporter terminal_reporter:\n :param _pytest.capture.CaptureManager capture_manager:\n \"\"\"\n logging.StreamHandler.__init__(self, stream=terminal_reporter)\n self.capture_manager = capture_manager\n self.reset()\n self.set_when(None)\n self._test_outcome_written = False\n\n def reset(self):\n \"\"\"Reset the handler; should be called before the start of each test\"\"\"\n self._first_record_emitted = False\n\n def set_when(self, when):\n \"\"\"Prepares for the given test phase (setup/call/teardown)\"\"\"\n self._when = when\n self._section_name_shown = False\n if when == \"start\":\n self._test_outcome_written = False", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py__LiveLoggingStreamHandler.emit_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/logging.py__LiveLoggingStreamHandler.emit_", "embedding": null, "metadata": {"file_path": "src/_pytest/logging.py", "file_name": "logging.py", "file_type": "text/x-python", "category": "implementation", "start_line": 645, "end_line": 663, "span_ids": ["_LiveLoggingStreamHandler.emit"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _LiveLoggingStreamHandler(logging.StreamHandler):\n\n def emit(self, record):\n ctx_manager = (\n self.capture_manager.global_and_fixture_disabled()\n if self.capture_manager\n else dummy_context_manager()\n )\n with ctx_manager:\n if not self._first_record_emitted:\n self.stream.write(\"\\n\")\n self._first_record_emitted = True\n elif self._when in (\"teardown\", \"finish\"):\n if not self._test_outcome_written:\n self._test_outcome_written = True\n self.stream.write(\"\\n\")\n if not self._section_name_shown and self._when:\n self.stream.section(\"live log \" + self._when, sep=\"-\", bold=True)\n self._section_name_shown = True\n logging.StreamHandler.emit(self, record)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py__core_implementation_o_EXIT_NOTESTSCOLLECTED.5": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py__core_implementation_o_EXIT_NOTESTSCOLLECTED.5", "embedding": null, "metadata": {"file_path": "src/_pytest/main.py", "file_name": "main.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 33, "span_ids": ["imports", "docstring", "impl"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" core implementation of testing process: init, session, runtest loop. \"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\nimport fnmatch\nimport functools\nimport os\nimport pkgutil\nimport sys\nimport warnings\n\nimport attr\nimport py\nimport six\n\nimport _pytest._code\nfrom _pytest import nodes\nfrom _pytest.config import directory_arg\nfrom _pytest.config import hookimpl\nfrom _pytest.config import UsageError\nfrom _pytest.deprecated import PYTEST_CONFIG_GLOBAL\nfrom _pytest.outcomes import exit\nfrom _pytest.runner import collect_one_node\n\n# exitcodes for the command line\nEXIT_OK = 0\nEXIT_TESTSFAILED = 1\nEXIT_INTERRUPTED = 2\nEXIT_INTERNALERROR = 3\nEXIT_USAGEERROR = 4\nEXIT_NOTESTSCOLLECTED = 5", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py__ConfigDeprecated_pytest_configure._compatibility": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py__ConfigDeprecated_pytest_configure._compatibility", "embedding": null, "metadata": {"file_path": "src/_pytest/main.py", "file_name": "main.py", "file_type": "text/x-python", "category": "implementation", "start_line": 174, "end_line": 191, "span_ids": ["_ConfigDeprecated", "_ConfigDeprecated.__getattr__", "pytest_configure", "_ConfigDeprecated.__repr__", "_ConfigDeprecated.__setattr__"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class _ConfigDeprecated(object):\n def __init__(self, config):\n self.__dict__[\"_config\"] = config\n\n def __getattr__(self, attr):\n warnings.warn(PYTEST_CONFIG_GLOBAL, stacklevel=2)\n return getattr(self._config, attr)\n\n def __setattr__(self, attr, val):\n warnings.warn(PYTEST_CONFIG_GLOBAL, stacklevel=2)\n return setattr(self._config, attr, val)\n\n def __repr__(self):\n return \"{}({!r})\".format(type(self).__name__, self._config)\n\n\ndef pytest_configure(config):\n __import__(\"pytest\").config = _ConfigDeprecated(config) # compatibility", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_wrap_session_wrap_session.return.session_exitstatus": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_wrap_session_wrap_session.return.session_exitstatus", "embedding": null, "metadata": {"file_path": "src/_pytest/main.py", "file_name": "main.py", "file_type": "text/x-python", "category": "implementation", "start_line": 194, "end_line": 238, "span_ids": ["wrap_session"], "tokens": 358}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def wrap_session(config, doit):\n \"\"\"Skeleton command line program\"\"\"\n session = Session(config)\n session.exitstatus = EXIT_OK\n initstate = 0\n try:\n try:\n config._do_configure()\n initstate = 1\n config.hook.pytest_sessionstart(session=session)\n initstate = 2\n session.exitstatus = doit(config, session) or 0\n except UsageError:\n session.exitstatus = EXIT_USAGEERROR\n raise\n except Failed:\n session.exitstatus = EXIT_TESTSFAILED\n except (KeyboardInterrupt, exit.Exception):\n excinfo = _pytest._code.ExceptionInfo.from_current()\n exitstatus = EXIT_INTERRUPTED\n if isinstance(excinfo.value, exit.Exception):\n if excinfo.value.returncode is not None:\n exitstatus = excinfo.value.returncode\n if initstate < 2:\n sys.stderr.write(\n \"{}: {}\\n\".format(excinfo.typename, excinfo.value.msg)\n )\n config.hook.pytest_keyboard_interrupt(excinfo=excinfo)\n session.exitstatus = exitstatus\n except: # noqa\n excinfo = _pytest._code.ExceptionInfo.from_current()\n config.notify_exception(excinfo, config.option)\n session.exitstatus = EXIT_INTERNALERROR\n if excinfo.errisinstance(SystemExit):\n sys.stderr.write(\"mainloop: caught unexpected SystemExit!\\n\")\n\n finally:\n excinfo = None # Explicitly break reference cycle.\n session.startdir.chdir()\n if initstate >= 2:\n config.hook.pytest_sessionfinish(\n session=session, exitstatus=session.exitstatus\n )\n config._ensure_unconfigure()\n return session.exitstatus", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_pytest_cmdline_main_pytest_runtestloop.return.True": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_pytest_cmdline_main_pytest_runtestloop.return.True", "embedding": null, "metadata": {"file_path": "src/_pytest/main.py", "file_name": "main.py", "file_type": "text/x-python", "category": "implementation", "start_line": 241, "end_line": 275, "span_ids": ["pytest_cmdline_main", "pytest_collection", "pytest_runtestloop", "_main"], "tokens": 243}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_cmdline_main(config):\n return wrap_session(config, _main)\n\n\ndef _main(config, session):\n \"\"\" default command line protocol for initialization, session,\n running tests and reporting. \"\"\"\n config.hook.pytest_collection(session=session)\n config.hook.pytest_runtestloop(session=session)\n\n if session.testsfailed:\n return EXIT_TESTSFAILED\n elif session.testscollected == 0:\n return EXIT_NOTESTSCOLLECTED\n\n\ndef pytest_collection(session):\n return session.perform_collect()\n\n\ndef pytest_runtestloop(session):\n if session.testsfailed and not session.config.option.continue_on_collection_errors:\n raise session.Interrupted(\"%d errors during collection\" % session.testsfailed)\n\n if session.config.option.collectonly:\n return True\n\n for i, item in enumerate(session.items):\n nextitem = session.items[i + 1] if i + 1 < len(session.items) else None\n item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)\n if session.shouldfail:\n raise session.Failed(session.shouldfail)\n if session.shouldstop:\n raise session.Interrupted(session.shouldstop)\n return True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py__in_venv__in_venv.return.any_fname_basename_in_ac": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py__in_venv__in_venv.return.any_fname_basename_in_ac", "embedding": null, "metadata": {"file_path": "src/_pytest/main.py", "file_name": "main.py", "file_type": "text/x-python", "category": "implementation", "start_line": 278, "end_line": 292, "span_ids": ["_in_venv"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _in_venv(path):\n \"\"\"Attempts to detect if ``path`` is the root of a Virtual Environment by\n checking for the existence of the appropriate activate script\"\"\"\n bindir = path.join(\"Scripts\" if sys.platform.startswith(\"win\") else \"bin\")\n if not bindir.isdir():\n return False\n activates = (\n \"activate\",\n \"activate.csh\",\n \"activate.fish\",\n \"Activate\",\n \"Activate.bat\",\n \"Activate.ps1\",\n )\n return any([fname.basename in activates for fname in bindir.listdir()])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_pytest_ignore_collect_pytest_collection_modifyitems.if_deselected_.items_remaining": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_pytest_ignore_collect_pytest_collection_modifyitems.if_deselected_.items_remaining", "embedding": null, "metadata": {"file_path": "src/_pytest/main.py", "file_name": "main.py", "file_type": "text/x-python", "category": "implementation", "start_line": 295, "end_line": 341, "span_ids": ["pytest_ignore_collect", "pytest_collection_modifyitems"], "tokens": 339}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_ignore_collect(path, config):\n ignore_paths = config._getconftest_pathlist(\"collect_ignore\", path=path.dirpath())\n ignore_paths = ignore_paths or []\n excludeopt = config.getoption(\"ignore\")\n if excludeopt:\n ignore_paths.extend([py.path.local(x) for x in excludeopt])\n\n if py.path.local(path) in ignore_paths:\n return True\n\n ignore_globs = config._getconftest_pathlist(\n \"collect_ignore_glob\", path=path.dirpath()\n )\n ignore_globs = ignore_globs or []\n excludeglobopt = config.getoption(\"ignore_glob\")\n if excludeglobopt:\n ignore_globs.extend([py.path.local(x) for x in excludeglobopt])\n\n if any(\n fnmatch.fnmatch(six.text_type(path), six.text_type(glob))\n for glob in ignore_globs\n ):\n return True\n\n allow_in_venv = config.getoption(\"collect_in_virtualenv\")\n if not allow_in_venv and _in_venv(path):\n return True\n\n return False\n\n\ndef pytest_collection_modifyitems(items, config):\n deselect_prefixes = tuple(config.getoption(\"deselect\") or [])\n if not deselect_prefixes:\n return\n\n remaining = []\n deselected = []\n for colitem in items:\n if colitem.nodeid.startswith(deselect_prefixes):\n deselected.append(colitem)\n else:\n remaining.append(colitem)\n\n if deselected:\n config.hook.pytest_deselected(items=deselected)\n items[:] = remaining", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py__patched_find_module__patched_find_module.if_six_PY2_python_3_4.else_.yield": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py__patched_find_module__patched_find_module.if_six_PY2_python_3_4.else_.yield", "embedding": null, "metadata": {"file_path": "src/_pytest/main.py", "file_name": "main.py", "file_type": "text/x-python", "category": "implementation", "start_line": 344, "end_line": 381, "span_ids": ["_patched_find_module"], "tokens": 333}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@contextlib.contextmanager\ndef _patched_find_module():\n \"\"\"Patch bug in pkgutil.ImpImporter.find_module\n\n When using pkgutil.find_loader on python<3.4 it removes symlinks\n from the path due to a call to os.path.realpath. This is not consistent\n with actually doing the import (in these versions, pkgutil and __import__\n did not share the same underlying code). This can break conftest\n discovery for pytest where symlinks are involved.\n\n The only supported python<3.4 by pytest is python 2.7.\n \"\"\"\n if six.PY2: # python 3.4+ uses importlib instead\n\n def find_module_patched(self, fullname, path=None):\n # Note: we ignore 'path' argument since it is only used via meta_path\n subname = fullname.split(\".\")[-1]\n if subname != fullname and self.path is None:\n return None\n if self.path is None:\n path = None\n else:\n # original: path = [os.path.realpath(self.path)]\n path = [self.path]\n try:\n file, filename, etc = pkgutil.imp.find_module(subname, path)\n except ImportError:\n return None\n return pkgutil.ImpLoader(fullname, file, filename, etc)\n\n old_find_module = pkgutil.ImpImporter.find_module\n pkgutil.ImpImporter.find_module = find_module_patched\n try:\n yield\n finally:\n pkgutil.ImpImporter.find_module = old_find_module\n else:\n yield", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_FSHookProxy__bestrelpath_cache.__missing__.return.r": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_FSHookProxy__bestrelpath_cache.__missing__.return.r", "embedding": null, "metadata": {"file_path": "src/_pytest/main.py", "file_name": "main.py", "file_type": "text/x-python", "category": "implementation", "start_line": 384, "end_line": 417, "span_ids": ["FSHookProxy.__getattr__", "NoMatch", "Failed", "Interrupted", "FSHookProxy", "_bestrelpath_cache", "_bestrelpath_cache.__missing__"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FSHookProxy(object):\n def __init__(self, fspath, pm, remove_mods):\n self.fspath = fspath\n self.pm = pm\n self.remove_mods = remove_mods\n\n def __getattr__(self, name):\n x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods)\n self.__dict__[name] = x\n return x\n\n\nclass NoMatch(Exception):\n \"\"\" raised if matching cannot locate a matching names. \"\"\"\n\n\nclass Interrupted(KeyboardInterrupt):\n \"\"\" signals an interrupted test run. \"\"\"\n\n __module__ = \"builtins\" # for py3\n\n\nclass Failed(Exception):\n \"\"\" signals a stop as failed test run. \"\"\"\n\n\n@attr.s\nclass _bestrelpath_cache(dict):\n path = attr.ib()\n\n def __missing__(self, path):\n r = self.path.bestrelpath(path)\n self[path] = r\n return r", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session_Session.isinitpath.return.path_in_self__initialpath": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session_Session.isinitpath.return.path_in_self__initialpath", "embedding": null, "metadata": {"file_path": "src/_pytest/main.py", "file_name": "main.py", "file_type": "text/x-python", "category": "implementation", "start_line": 420, "end_line": 475, "span_ids": ["Session.pytest_runtest_logreport", "Session.__repr__", "Session", "Session._node_location_to_relpath", "Session.pytest_collectstart", "Session.isinitpath", "Session:6"], "tokens": 473}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Session(nodes.FSCollector):\n Interrupted = Interrupted\n Failed = Failed\n\n def __init__(self, config):\n nodes.FSCollector.__init__(\n self, config.rootdir, parent=None, config=config, session=self, nodeid=\"\"\n )\n self.testsfailed = 0\n self.testscollected = 0\n self.shouldstop = False\n self.shouldfail = False\n self.trace = config.trace.root.get(\"collection\")\n self._norecursepatterns = config.getini(\"norecursedirs\")\n self.startdir = config.invocation_dir\n self._initialpaths = frozenset()\n # Keep track of any collected nodes in here, so we don't duplicate fixtures\n self._node_cache = {}\n self._bestrelpathcache = _bestrelpath_cache(config.rootdir)\n # Dirnames of pkgs with dunder-init files.\n self._pkg_roots = {}\n\n self.config.pluginmanager.register(self, name=\"session\")\n\n def __repr__(self):\n return \"<%s %s exitstatus=%r testsfailed=%d testscollected=%d>\" % (\n self.__class__.__name__,\n self.name,\n getattr(self, \"exitstatus\", \"\"),\n self.testsfailed,\n self.testscollected,\n )\n\n def _node_location_to_relpath(self, node_path):\n # bestrelpath is a quite slow function\n return self._bestrelpathcache[node_path]\n\n @hookimpl(tryfirst=True)\n def pytest_collectstart(self):\n if self.shouldfail:\n raise self.Failed(self.shouldfail)\n if self.shouldstop:\n raise self.Interrupted(self.shouldstop)\n\n @hookimpl(tryfirst=True)\n def pytest_runtest_logreport(self, report):\n if report.failed and not hasattr(report, \"wasxfail\"):\n self.testsfailed += 1\n maxfail = self.config.getvalue(\"maxfail\")\n if maxfail and self.testsfailed >= maxfail:\n self.shouldfail = \"stopping after %d failures\" % (self.testsfailed)\n\n pytest_collectreport = pytest_runtest_logreport\n\n def isinitpath(self, path):\n return path in self._initialpaths", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session.gethookproxy_Session.perform_collect.return.items": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session.gethookproxy_Session.perform_collect.return.items", "embedding": null, "metadata": {"file_path": "src/_pytest/main.py", "file_name": "main.py", "file_type": "text/x-python", "category": "implementation", "start_line": 477, "end_line": 502, "span_ids": ["Session.gethookproxy", "Session.perform_collect"], "tokens": 241}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Session(nodes.FSCollector):\n\n def gethookproxy(self, fspath):\n # check if we have the common case of running\n # hooks with all conftest.py files\n pm = self.config.pluginmanager\n my_conftestmodules = pm._getconftestmodules(fspath)\n remove_mods = pm._conftest_plugins.difference(my_conftestmodules)\n if remove_mods:\n # one or more conftests are not in use at this fspath\n proxy = FSHookProxy(fspath, pm, remove_mods)\n else:\n # all plugis are active for this fspath\n proxy = self.config.hook\n return proxy\n\n def perform_collect(self, args=None, genitems=True):\n hook = self.config.hook\n try:\n items = self._perform_collect(args, genitems)\n self.config.pluginmanager.check_pending()\n hook.pytest_collection_modifyitems(\n session=self, config=self.config, items=items\n )\n finally:\n hook.pytest_collection_finish(session=self)\n self.testscollected = len(items)\n return items", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session._perform_collect_Session.collect.for_initialpart_in_self__.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session._perform_collect_Session.collect.for_initialpart_in_self__.None_1", "embedding": null, "metadata": {"file_path": "src/_pytest/main.py", "file_name": "main.py", "file_type": "text/x-python", "category": "implementation", "start_line": 504, "end_line": 549, "span_ids": ["Session.collect", "Session._perform_collect"], "tokens": 379}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Session(nodes.FSCollector):\n\n def _perform_collect(self, args, genitems):\n if args is None:\n args = self.config.args\n self.trace(\"perform_collect\", self, args)\n self.trace.root.indent += 1\n self._notfound = []\n initialpaths = []\n self._initialparts = []\n self.items = items = []\n for arg in args:\n parts = self._parsearg(arg)\n self._initialparts.append(parts)\n initialpaths.append(parts[0])\n self._initialpaths = frozenset(initialpaths)\n rep = collect_one_node(self)\n self.ihook.pytest_collectreport(report=rep)\n self.trace.root.indent -= 1\n if self._notfound:\n errors = []\n for arg, exc in self._notfound:\n line = \"(no name %r in any of %r)\" % (arg, exc.args[0])\n errors.append(\"not found: %s\\n%s\" % (arg, line))\n # XXX: test this\n raise UsageError(*errors)\n if not genitems:\n return rep.result\n else:\n if rep.passed:\n for node in rep.result:\n self.items.extend(self.genitems(node))\n return items\n\n def collect(self):\n for initialpart in self._initialparts:\n arg = \"::\".join(map(str, initialpart))\n self.trace(\"processing argument\", arg)\n self.trace.root.indent += 1\n try:\n for x in self._collect(arg):\n yield x\n except NoMatch:\n # we are inside a make_report hook so\n # we cannot directly pass through the exception\n self._notfound.append((arg, sys.exc_info()[1]))\n\n self.trace.root.indent -= 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session._collect_Session._collect.if_argpath_check_dir_1_.else_.for_y_in_m_.yield_y": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session._collect_Session._collect.if_argpath_check_dir_1_.else_.for_y_in_m_.yield_y", "embedding": null, "metadata": {"file_path": "src/_pytest/main.py", "file_name": "main.py", "file_type": "text/x-python", "category": "implementation", "start_line": 551, "end_line": 626, "span_ids": ["Session._collect"], "tokens": 688}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Session(nodes.FSCollector):\n\n def _collect(self, arg):\n from _pytest.python import Package\n\n names = self._parsearg(arg)\n argpath = names.pop(0)\n\n # Start with a Session root, and delve to argpath item (dir or file)\n # and stack all Packages found on the way.\n # No point in finding packages when collecting doctests\n if not self.config.getoption(\"doctestmodules\", False):\n pm = self.config.pluginmanager\n for parent in reversed(argpath.parts()):\n if pm._confcutdir and pm._confcutdir.relto(parent):\n break\n\n if parent.isdir():\n pkginit = parent.join(\"__init__.py\")\n if pkginit.isfile():\n if pkginit not in self._node_cache:\n col = self._collectfile(pkginit, handle_dupes=False)\n if col:\n if isinstance(col[0], Package):\n self._pkg_roots[parent] = col[0]\n # always store a list in the cache, matchnodes expects it\n self._node_cache[col[0].fspath] = [col[0]]\n\n # If it's a directory argument, recurse and look for any Subpackages.\n # Let the Package collector deal with subnodes, don't collect here.\n if argpath.check(dir=1):\n assert not names, \"invalid arg %r\" % (arg,)\n\n seen_dirs = set()\n for path in argpath.visit(\n fil=self._visit_filter, rec=self._recurse, bf=True, sort=True\n ):\n dirpath = path.dirpath()\n if dirpath not in seen_dirs:\n # Collect packages first.\n seen_dirs.add(dirpath)\n pkginit = dirpath.join(\"__init__.py\")\n if pkginit.exists():\n for x in self._collectfile(pkginit):\n yield x\n if isinstance(x, Package):\n self._pkg_roots[dirpath] = x\n if dirpath in self._pkg_roots:\n # Do not collect packages here.\n continue\n\n for x in self._collectfile(path):\n key = (type(x), x.fspath)\n if key in self._node_cache:\n yield self._node_cache[key]\n else:\n self._node_cache[key] = x\n yield x\n else:\n assert argpath.check(file=1)\n\n if argpath in self._node_cache:\n col = self._node_cache[argpath]\n else:\n collect_root = self._pkg_roots.get(argpath.dirname, self)\n col = collect_root._collectfile(argpath, handle_dupes=False)\n if col:\n self._node_cache[argpath] = col\n m = self.matchnodes(col, names)\n # If __init__.py was the only file requested, then the matched node will be\n # the corresponding Package, and the first yielded item will be the __init__\n # Module itself, so just use that. If this special case isn't taken, then all\n # the files in the package will be yielded.\n if argpath.basename == \"__init__.py\":\n yield next(m[0].collect())\n return\n for y in m:\n yield y", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session._collectfile_Session._collectfile.return.ihook_pytest_collect_file": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session._collectfile_Session._collectfile.return.ihook_pytest_collect_file", "embedding": null, "metadata": {"file_path": "src/_pytest/main.py", "file_name": "main.py", "file_type": "text/x-python", "category": "implementation", "start_line": 628, "end_line": 649, "span_ids": ["Session._collectfile"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Session(nodes.FSCollector):\n\n def _collectfile(self, path, handle_dupes=True):\n assert path.isfile(), \"%r is not a file (isdir=%r, exists=%r, islink=%r)\" % (\n path,\n path.isdir(),\n path.exists(),\n path.islink(),\n )\n ihook = self.gethookproxy(path)\n if not self.isinitpath(path):\n if ihook.pytest_ignore_collect(path=path, config=self.config):\n return ()\n\n if handle_dupes:\n keepduplicates = self.config.getoption(\"keepduplicates\")\n if not keepduplicates:\n duplicate_paths = self.config.pluginmanager._duplicatepaths\n if path in duplicate_paths:\n return ()\n else:\n duplicate_paths.add(path)\n\n return ihook.pytest_collect_file(path=path, parent=self)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session._recurse_Session.if_six_PY2_.else_._visit_filter.return.f_check_file_1_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session._recurse_Session.if_six_PY2_.else_._visit_filter.return.f_check_file_1_", "embedding": null, "metadata": {"file_path": "src/_pytest/main.py", "file_name": "main.py", "file_type": "text/x-python", "category": "implementation", "start_line": 651, "end_line": 674, "span_ids": ["Session._recurse", "Session:8"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Session(nodes.FSCollector):\n\n def _recurse(self, dirpath):\n if dirpath.basename == \"__pycache__\":\n return False\n ihook = self.gethookproxy(dirpath.dirpath())\n if ihook.pytest_ignore_collect(path=dirpath, config=self.config):\n return False\n for pat in self._norecursepatterns:\n if dirpath.check(fnmatch=pat):\n return False\n ihook = self.gethookproxy(dirpath)\n ihook.pytest_collect_directory(path=dirpath, parent=self)\n return True\n\n if six.PY2:\n\n @staticmethod\n def _visit_filter(f):\n return f.check(file=1) and not f.strpath.endswith(\"*.pyc\")\n\n else:\n\n @staticmethod\n def _visit_filter(f):\n return f.check(file=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session._tryconvertpyarg_Session._tryconvertpyarg.return.path": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session._tryconvertpyarg_Session._tryconvertpyarg.return.path", "embedding": null, "metadata": {"file_path": "src/_pytest/main.py", "file_name": "main.py", "file_type": "text/x-python", "category": "implementation", "start_line": 676, "end_line": 695, "span_ids": ["Session._tryconvertpyarg"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Session(nodes.FSCollector):\n\n def _tryconvertpyarg(self, x):\n \"\"\"Convert a dotted module name to path.\"\"\"\n try:\n with _patched_find_module():\n loader = pkgutil.find_loader(x)\n except ImportError:\n return x\n if loader is None:\n return x\n # This method is sometimes invoked when AssertionRewritingHook, which\n # does not define a get_filename method, is already in place:\n try:\n with _patched_find_module():\n path = loader.get_filename(x)\n except AttributeError:\n # Retrieve path from AssertionRewritingHook:\n path = loader.modules[x][0].co_filename\n if loader.is_package(x):\n path = os.path.dirname(path)\n return path", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session._parsearg_Session.matchnodes.return.nodes": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session._parsearg_Session.matchnodes.return.nodes", "embedding": null, "metadata": {"file_path": "src/_pytest/main.py", "file_name": "main.py", "file_type": "text/x-python", "category": "implementation", "start_line": 697, "end_line": 722, "span_ids": ["Session._parsearg", "Session.matchnodes"], "tokens": 258}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Session(nodes.FSCollector):\n\n def _parsearg(self, arg):\n \"\"\" return (fspath, names) tuple after checking the file exists. \"\"\"\n parts = str(arg).split(\"::\")\n if self.config.option.pyargs:\n parts[0] = self._tryconvertpyarg(parts[0])\n relpath = parts[0].replace(\"/\", os.sep)\n path = self.config.invocation_dir.join(relpath, abs=True)\n if not path.check():\n if self.config.option.pyargs:\n raise UsageError(\n \"file or package not found: \" + arg + \" (missing __init__.py?)\"\n )\n raise UsageError(\"file not found: \" + arg)\n parts[0] = path.realpath()\n return parts\n\n def matchnodes(self, matching, names):\n self.trace(\"matchnodes\", matching, names)\n self.trace.root.indent += 1\n nodes = self._matchnodes(matching, names)\n num = len(nodes)\n self.trace(\"matchnodes finished -> \", num, \"nodes\")\n self.trace.root.indent -= 1\n if num == 0:\n raise NoMatch(matching, names[:1])\n return nodes", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session._matchnodes_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_Session._matchnodes_", "embedding": null, "metadata": {"file_path": "src/_pytest/main.py", "file_name": "main.py", "file_type": "text/x-python", "category": "implementation", "start_line": 724, "end_line": 774, "span_ids": ["Session._matchnodes", "Session.genitems"], "tokens": 430}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Session(nodes.FSCollector):\n\n def _matchnodes(self, matching, names):\n if not matching or not names:\n return matching\n name = names[0]\n assert name\n nextnames = names[1:]\n resultnodes = []\n for node in matching:\n if isinstance(node, nodes.Item):\n if not names:\n resultnodes.append(node)\n continue\n assert isinstance(node, nodes.Collector)\n key = (type(node), node.nodeid)\n if key in self._node_cache:\n rep = self._node_cache[key]\n else:\n rep = collect_one_node(node)\n self._node_cache[key] = rep\n if rep.passed:\n has_matched = False\n for x in rep.result:\n # TODO: remove parametrized workaround once collection structure contains parametrization\n if x.name == name or x.name.split(\"[\")[0] == name:\n resultnodes.extend(self.matchnodes([x], nextnames))\n has_matched = True\n # XXX accept IDs that don't have \"()\" for class instances\n if not has_matched and len(rep.result) == 1 and x.name == \"()\":\n nextnames.insert(0, name)\n resultnodes.extend(self.matchnodes([x], nextnames))\n else:\n # report collection failures here to avoid failing to run some test\n # specified in the command line because the module could not be\n # imported (#134)\n node.ihook.pytest_collectreport(report=rep)\n return resultnodes\n\n def genitems(self, node):\n self.trace(\"genitems\", node)\n if isinstance(node, nodes.Item):\n node.ihook.pytest_itemcollected(item=node)\n yield node\n else:\n assert isinstance(node, nodes.Collector)\n rep = collect_one_node(node)\n if rep.passed:\n for subnode in rep.result:\n for x in self.genitems(subnode):\n yield x\n node.ihook.pytest_collectreport(report=rep)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/__init__.py__generic_mechanism_for___all__._Mark_MarkDecorator_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/__init__.py__generic_mechanism_for___all__._Mark_MarkDecorator_", "embedding": null, "metadata": {"file_path": "src/_pytest/mark/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 17, "span_ids": ["imports", "docstring", "impl"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" generic mechanism for marking and selecting python functions. \"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom .legacy import matchkeyword\nfrom .legacy import matchmark\nfrom .structures import EMPTY_PARAMETERSET_OPTION\nfrom .structures import get_empty_parameterset_mark\nfrom .structures import Mark\nfrom .structures import MARK_GEN\nfrom .structures import MarkDecorator\nfrom .structures import MarkGenerator\nfrom .structures import ParameterSet\nfrom _pytest.config import UsageError\n\n__all__ = [\"Mark\", \"MarkDecorator\", \"MarkGenerator\", \"get_empty_parameterset_mark\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/__init__.py_param_param.return.ParameterSet_param_value": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/__init__.py_param_param.return.ParameterSet_param_value", "embedding": null, "metadata": {"file_path": "src/_pytest/mark/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 20, "end_line": 37, "span_ids": ["param"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def param(*values, **kw):\n \"\"\"Specify a parameter in `pytest.mark.parametrize`_ calls or\n :ref:`parametrized fixtures `.\n\n .. code-block:: python\n\n @pytest.mark.parametrize(\"test_input,expected\", [\n (\"3+5\", 8),\n pytest.param(\"6*9\", 42, marks=pytest.mark.xfail),\n ])\n def test_eval(test_input, expected):\n assert eval(test_input) == expected\n\n :param values: variable args of the values of the parameter set, in order.\n :keyword marks: a single mark or a list of marks to be applied to this parameter set.\n :keyword str id: the id to attribute to this parameter set.\n \"\"\"\n return ParameterSet.param(*values, **kw)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/__init__.py_pytest_addoption_pytest_addoption.parser_addini_EMPTY_PARAM": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/__init__.py_pytest_addoption_pytest_addoption.parser_addini_EMPTY_PARAM", "embedding": null, "metadata": {"file_path": "src/_pytest/mark/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 40, "end_line": 78, "span_ids": ["pytest_addoption"], "tokens": 341}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_addoption(parser):\n group = parser.getgroup(\"general\")\n group._addoption(\n \"-k\",\n action=\"store\",\n dest=\"keyword\",\n default=\"\",\n metavar=\"EXPRESSION\",\n help=\"only run tests which match the given substring expression. \"\n \"An expression is a python evaluatable expression \"\n \"where all names are substring-matched against test names \"\n \"and their parent classes. Example: -k 'test_method or test_\"\n \"other' matches all test functions and classes whose name \"\n \"contains 'test_method' or 'test_other', while -k 'not test_method' \"\n \"matches those that don't contain 'test_method' in their names. \"\n \"-k 'not test_method and not test_other' will eliminate the matches. \"\n \"Additionally keywords are matched to classes and functions \"\n \"containing extra names in their 'extra_keyword_matches' set, \"\n \"as well as functions which have names assigned directly to them.\",\n )\n\n group._addoption(\n \"-m\",\n action=\"store\",\n dest=\"markexpr\",\n default=\"\",\n metavar=\"MARKEXPR\",\n help=\"only run tests matching given mark expression. \"\n \"example: -m 'mark1 and not mark2'.\",\n )\n\n group.addoption(\n \"--markers\",\n action=\"store_true\",\n help=\"show markers (builtin, plugin and per-project ones).\",\n )\n\n parser.addini(\"markers\", \"markers for test functions\", \"linelist\")\n parser.addini(EMPTY_PARAMETERSET_OPTION, \"default marker for empty parametersets\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/__init__.py_pytest_cmdline_main_pytest_cmdline_main.tryfirst.True": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/__init__.py_pytest_cmdline_main_pytest_cmdline_main.tryfirst.True", "embedding": null, "metadata": {"file_path": "src/_pytest/mark/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 81, "end_line": 98, "span_ids": ["pytest_cmdline_main", "impl:3"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_cmdline_main(config):\n import _pytest.config\n\n if config.option.markers:\n config._do_configure()\n tw = _pytest.config.create_terminal_writer(config)\n for line in config.getini(\"markers\"):\n parts = line.split(\":\", 1)\n name = parts[0]\n rest = parts[1] if len(parts) == 2 else \"\"\n tw.write(\"@pytest.mark.%s:\" % name, bold=True)\n tw.line(rest)\n tw.line()\n config._ensure_unconfigure()\n return 0\n\n\npytest_cmdline_main.tryfirst = True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/__init__.py_deselect_by_keyword_deselect_by_keyword.if_deselected_.items_remaining": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/__init__.py_deselect_by_keyword_deselect_by_keyword.if_deselected_.items_remaining", "embedding": null, "metadata": {"file_path": "src/_pytest/mark/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 101, "end_line": 125, "span_ids": ["deselect_by_keyword"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def deselect_by_keyword(items, config):\n keywordexpr = config.option.keyword.lstrip()\n if not keywordexpr:\n return\n\n if keywordexpr.startswith(\"-\"):\n keywordexpr = \"not \" + keywordexpr[1:]\n selectuntil = False\n if keywordexpr[-1:] == \":\":\n selectuntil = True\n keywordexpr = keywordexpr[:-1]\n\n remaining = []\n deselected = []\n for colitem in items:\n if keywordexpr and not matchkeyword(colitem, keywordexpr):\n deselected.append(colitem)\n else:\n if selectuntil:\n keywordexpr = None\n remaining.append(colitem)\n\n if deselected:\n config.hook.pytest_deselected(items=deselected)\n items[:] = remaining", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/__init__.py_deselect_by_mark_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/__init__.py_deselect_by_mark_", "embedding": null, "metadata": {"file_path": "src/_pytest/mark/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "implementation", "start_line": 128, "end_line": 166, "span_ids": ["pytest_unconfigure", "deselect_by_mark", "pytest_configure", "pytest_collection_modifyitems"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def deselect_by_mark(items, config):\n matchexpr = config.option.markexpr\n if not matchexpr:\n return\n\n remaining = []\n deselected = []\n for item in items:\n if matchmark(item, matchexpr):\n remaining.append(item)\n else:\n deselected.append(item)\n\n if deselected:\n config.hook.pytest_deselected(items=deselected)\n items[:] = remaining\n\n\ndef pytest_collection_modifyitems(items, config):\n deselect_by_keyword(items, config)\n deselect_by_mark(items, config)\n\n\ndef pytest_configure(config):\n config._old_mark_config = MARK_GEN._config\n MARK_GEN._config = config\n\n empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION)\n\n if empty_parameterset not in (\"skip\", \"xfail\", \"fail_at_collect\", None, \"\"):\n raise UsageError(\n \"{!s} must be one of skip, xfail or fail_at_collect\"\n \" but it is {!r}\".format(EMPTY_PARAMETERSET_OPTION, empty_parameterset)\n )\n\n\ndef pytest_unconfigure(config):\n MARK_GEN._config = getattr(config, \"_old_mark_config\", None)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/evaluate.py_os_MarkEvaluator.invalidraise.return.not_isinstance_exc_raise": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/evaluate.py_os_MarkEvaluator.invalidraise.return.not_isinstance_exc_raise", "embedding": null, "metadata": {"file_path": "src/_pytest/mark/evaluate.py", "file_name": "evaluate.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 48, "span_ids": ["MarkEvaluator.invalidraise", "cached_eval", "MarkEvaluator:2", "MarkEvaluator._get_marks", "MarkEvaluator.wasvalid", "imports", "MarkEvaluator", "MarkEvaluator.__bool__"], "tokens": 261}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import os\nimport platform\nimport sys\nimport traceback\n\nimport six\n\nfrom ..outcomes import fail\nfrom ..outcomes import TEST_OUTCOME\n\n\ndef cached_eval(config, expr, d):\n if not hasattr(config, \"_evalcache\"):\n config._evalcache = {}\n try:\n return config._evalcache[expr]\n except KeyError:\n import _pytest._code\n\n exprcode = _pytest._code.compile(expr, mode=\"eval\")\n config._evalcache[expr] = x = eval(exprcode, d)\n return x\n\n\nclass MarkEvaluator(object):\n def __init__(self, item, name):\n self.item = item\n self._marks = None\n self._mark = None\n self._mark_name = name\n\n def __bool__(self):\n # dont cache here to prevent staleness\n return bool(self._get_marks())\n\n __nonzero__ = __bool__\n\n def wasvalid(self):\n return not hasattr(self, \"exc\")\n\n def _get_marks(self):\n return list(self.item.iter_markers(name=self._mark_name))\n\n def invalidraise(self, exc):\n raises = self.get(\"raises\")\n if not raises:\n return\n return not isinstance(exc, raises)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/evaluate.py_MarkEvaluator.istrue_MarkEvaluator._getglobals.return.d": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/evaluate.py_MarkEvaluator.istrue_MarkEvaluator._getglobals.return.d", "embedding": null, "metadata": {"file_path": "src/_pytest/mark/evaluate.py", "file_name": "evaluate.py", "file_type": "text/x-python", "category": "implementation", "start_line": 50, "end_line": 71, "span_ids": ["MarkEvaluator._getglobals", "MarkEvaluator.istrue"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class MarkEvaluator(object):\n\n def istrue(self):\n try:\n return self._istrue()\n except TEST_OUTCOME:\n self.exc = sys.exc_info()\n if isinstance(self.exc[1], SyntaxError):\n msg = [\" \" * (self.exc[1].offset + 4) + \"^\"]\n msg.append(\"SyntaxError: invalid syntax\")\n else:\n msg = traceback.format_exception_only(*self.exc[:2])\n fail(\n \"Error evaluating %r expression\\n\"\n \" %s\\n\"\n \"%s\" % (self._mark_name, self.expr, \"\\n\".join(msg)),\n pytrace=False,\n )\n\n def _getglobals(self):\n d = {\"os\": os, \"sys\": sys, \"platform\": platform, \"config\": self.item.config}\n if hasattr(self.item, \"obj\"):\n d.update(self.item.obj.__globals__)\n return d", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/evaluate.py_MarkEvaluator._istrue_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/evaluate.py_MarkEvaluator._istrue_", "embedding": null, "metadata": {"file_path": "src/_pytest/mark/evaluate.py", "file_name": "evaluate.py", "file_type": "text/x-python", "category": "implementation", "start_line": 73, "end_line": 126, "span_ids": ["MarkEvaluator._istrue", "MarkEvaluator.getexplanation", "MarkEvaluator.get"], "tokens": 348}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class MarkEvaluator(object):\n\n def _istrue(self):\n if hasattr(self, \"result\"):\n return self.result\n self._marks = self._get_marks()\n\n if self._marks:\n self.result = False\n for mark in self._marks:\n self._mark = mark\n if \"condition\" in mark.kwargs:\n args = (mark.kwargs[\"condition\"],)\n else:\n args = mark.args\n\n for expr in args:\n self.expr = expr\n if isinstance(expr, six.string_types):\n d = self._getglobals()\n result = cached_eval(self.item.config, expr, d)\n else:\n if \"reason\" not in mark.kwargs:\n # XXX better be checked at collection time\n msg = (\n \"you need to specify reason=STRING \"\n \"when using booleans as conditions.\"\n )\n fail(msg)\n result = bool(expr)\n if result:\n self.result = True\n self.reason = mark.kwargs.get(\"reason\", None)\n self.expr = expr\n return self.result\n\n if not args:\n self.result = True\n self.reason = mark.kwargs.get(\"reason\", None)\n return self.result\n return False\n\n def get(self, attr, default=None):\n if self._mark is None:\n return default\n return self._mark.kwargs.get(attr, default)\n\n def getexplanation(self):\n expl = getattr(self, \"reason\", None) or self.get(\"reason\", None)\n if not expl:\n if not hasattr(self, \"expr\"):\n return \"\"\n else:\n return \"condition: \" + str(self.expr)\n return expl", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/legacy.py___MarkMapping.__getitem__.return.name_in_self_own_mark_nam": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/legacy.py___MarkMapping.__getitem__.return.name_in_self_own_mark_nam", "embedding": null, "metadata": {"file_path": "src/_pytest/mark/legacy.py", "file_name": "legacy.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 25, "span_ids": ["MarkMapping.from_item", "MarkMapping", "docstring", "imports", "MarkMapping.__getitem__"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nthis is a place where we put datastructures used by legacy apis\nwe hope ot remove\n\"\"\"\nimport keyword\n\nimport attr\n\nfrom _pytest.config import UsageError\n\n\n@attr.s\nclass MarkMapping(object):\n \"\"\"Provides a local mapping for markers where item access\n resolves to True if the marker is present. \"\"\"\n\n own_mark_names = attr.ib()\n\n @classmethod\n def from_item(cls, item):\n mark_names = {mark.name for mark in item.iter_markers()}\n return cls(mark_names)\n\n def __getitem__(self, name):\n return name in self.own_mark_names", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/legacy.py_KeywordMapping_matchmark.try_.except_SyntaxError_as_e_.raise_SyntaxError_str_e_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/legacy.py_KeywordMapping_matchmark.try_.except_SyntaxError_as_e_.raise_SyntaxError_str_e_", "embedding": null, "metadata": {"file_path": "src/_pytest/mark/legacy.py", "file_name": "legacy.py", "file_type": "text/x-python", "category": "implementation", "start_line": 28, "end_line": 74, "span_ids": ["impl", "KeywordMapping.__getitem__", "matchmark", "KeywordMapping.from_item", "KeywordMapping"], "tokens": 318}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class KeywordMapping(object):\n \"\"\"Provides a local mapping for keywords.\n Given a list of names, map any substring of one of these names to True.\n \"\"\"\n\n def __init__(self, names):\n self._names = names\n\n @classmethod\n def from_item(cls, item):\n mapped_names = set()\n\n # Add the names of the current item and any parent items\n import pytest\n\n for item in item.listchain():\n if not isinstance(item, pytest.Instance):\n mapped_names.add(item.name)\n\n # Add the names added as extra keywords to current or parent items\n mapped_names.update(item.listextrakeywords())\n\n # Add the names attached to the current function through direct assignment\n if hasattr(item, \"function\"):\n mapped_names.update(item.function.__dict__)\n\n # add the markers to the keywords as we no longer handle them correctly\n mapped_names.update(mark.name for mark in item.iter_markers())\n\n return cls(mapped_names)\n\n def __getitem__(self, subname):\n for name in self._names:\n if subname in name:\n return True\n return False\n\n\npython_keywords_allowed_list = [\"or\", \"and\", \"not\"]\n\n\ndef matchmark(colitem, markexpr):\n \"\"\"Tries to match on any marker names, attached to the given colitem.\"\"\"\n try:\n return eval(markexpr, {}, MarkMapping.from_item(colitem))\n except SyntaxError as e:\n raise SyntaxError(str(e) + \"\\nMarker expression must be valid Python!\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/legacy.py_matchkeyword_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/legacy.py_matchkeyword_", "embedding": null, "metadata": {"file_path": "src/_pytest/mark/legacy.py", "file_name": "legacy.py", "file_type": "text/x-python", "category": "implementation", "start_line": 77, "end_line": 103, "span_ids": ["matchkeyword"], "tokens": 262}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def matchkeyword(colitem, keywordexpr):\n \"\"\"Tries to match given keyword expression to given collector item.\n\n Will match on the name of colitem, including the names of its parents.\n Only matches names of items which are either a :class:`Class` or a\n :class:`Function`.\n Additionally, matches on names in the 'extra_keyword_matches' set of\n any item, as well as names directly assigned to test functions.\n \"\"\"\n mapping = KeywordMapping.from_item(colitem)\n if \" \" not in keywordexpr:\n # special case to allow for simple \"-k pass\" and \"-k 1.3\"\n return mapping[keywordexpr]\n elif keywordexpr.startswith(\"not \") and \" \" not in keywordexpr[4:]:\n return not mapping[keywordexpr[4:]]\n for kwd in keywordexpr.split():\n if keyword.iskeyword(kwd) and kwd not in python_keywords_allowed_list:\n raise UsageError(\n \"Python keyword '{}' not accepted in expressions passed to '-k'\".format(\n kwd\n )\n )\n try:\n return eval(keywordexpr, {}, mapping)\n except SyntaxError:\n raise UsageError(\"Wrong expression passed to '-k': {}\".format(keywordexpr))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_inspect_istestfunc.return._": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_inspect_istestfunc.return._", "embedding": null, "metadata": {"file_path": "src/_pytest/mark/structures.py", "file_name": "structures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 34, "span_ids": ["alias", "imports", "istestfunc", "impl"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import inspect\nimport warnings\nfrom collections import namedtuple\nfrom operator import attrgetter\n\nimport attr\nimport six\n\nfrom ..compat import ascii_escaped\nfrom ..compat import getfslineno\nfrom ..compat import MappingMixin\nfrom ..compat import NOTSET\nfrom _pytest.deprecated import PYTEST_PARAM_UNKNOWN_KWARGS\nfrom _pytest.outcomes import fail\nfrom _pytest.warning_types import PytestUnknownMarkWarning\n\nEMPTY_PARAMETERSET_OPTION = \"empty_parameter_set_mark\"\n\n\ndef alias(name, warning=None):\n getter = attrgetter(name)\n\n def warned(self):\n warnings.warn(warning, stacklevel=2)\n return getter(self)\n\n return property(getter if warning is None else warned, doc=\"alias for \" + name)\n\n\ndef istestfunc(func):\n return (\n hasattr(func, \"__call__\")\n and getattr(func, \"__name__\", \"\") != \"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_get_empty_parameterset_mark_get_empty_parameterset_mark.return.mark_reason_reason_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_get_empty_parameterset_mark_get_empty_parameterset_mark.return.mark_reason_reason_", "embedding": null, "metadata": {"file_path": "src/_pytest/mark/structures.py", "file_name": "structures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 37, "end_line": 60, "span_ids": ["get_empty_parameterset_mark"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_empty_parameterset_mark(config, argnames, func):\n from ..nodes import Collector\n\n requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION)\n if requested_mark in (\"\", None, \"skip\"):\n mark = MARK_GEN.skip\n elif requested_mark == \"xfail\":\n mark = MARK_GEN.xfail(run=False)\n elif requested_mark == \"fail_at_collect\":\n f_name = func.__name__\n _, lineno = getfslineno(func)\n raise Collector.CollectError(\n \"Empty parameter set in '%s' at line %d\" % (f_name, lineno + 1)\n )\n else:\n raise LookupError(requested_mark)\n fs, lineno = getfslineno(func)\n reason = \"got empty parameter set %r, function %s at %s:%d\" % (\n argnames,\n func.__name__,\n fs,\n lineno,\n )\n return mark(reason=reason)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_ParameterSet_ParameterSet.param.return.cls_values_marks_id__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_ParameterSet_ParameterSet.param.return.cls_values_marks_id__", "embedding": null, "metadata": {"file_path": "src/_pytest/mark/structures.py", "file_name": "structures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 63, "end_line": 84, "span_ids": ["ParameterSet.param", "ParameterSet"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ParameterSet(namedtuple(\"ParameterSet\", \"values, marks, id\")):\n @classmethod\n def param(cls, *values, **kwargs):\n marks = kwargs.pop(\"marks\", ())\n if isinstance(marks, MarkDecorator):\n marks = (marks,)\n else:\n assert isinstance(marks, (tuple, list, set))\n\n id_ = kwargs.pop(\"id\", None)\n if id_ is not None:\n if not isinstance(id_, six.string_types):\n raise TypeError(\n \"Expected id to be a string, got {}: {!r}\".format(type(id_), id_)\n )\n id_ = ascii_escaped(id_)\n\n if kwargs:\n warnings.warn(\n PYTEST_PARAM_UNKNOWN_KWARGS.format(args=sorted(kwargs)), stacklevel=3\n )\n return cls(values, marks, id_)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_ParameterSet.extract_from_ParameterSet.extract_from.if_force_tuple_.else_.return.cls_parameterset_marks_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_ParameterSet.extract_from_ParameterSet.extract_from.if_force_tuple_.else_.return.cls_parameterset_marks_", "embedding": null, "metadata": {"file_path": "src/_pytest/mark/structures.py", "file_name": "structures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 86, "end_line": 103, "span_ids": ["ParameterSet.extract_from"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ParameterSet(namedtuple(\"ParameterSet\", \"values, marks, id\")):\n\n @classmethod\n def extract_from(cls, parameterset, force_tuple=False):\n \"\"\"\n :param parameterset:\n a legacy style parameterset that may or may not be a tuple,\n and may or may not be wrapped into a mess of mark objects\n\n :param force_tuple:\n enforce tuple wrapping so single argument tuple values\n don't get decomposed and break tests\n \"\"\"\n\n if isinstance(parameterset, cls):\n return parameterset\n if force_tuple:\n return cls.param(parameterset)\n else:\n return cls(parameterset, marks=[], id=None)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_ParameterSet._for_parametrize_ParameterSet._for_parametrize.return.argnames_parameters": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_ParameterSet._for_parametrize_ParameterSet._for_parametrize.return.argnames_parameters", "embedding": null, "metadata": {"file_path": "src/_pytest/mark/structures.py", "file_name": "structures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 105, "end_line": 144, "span_ids": ["ParameterSet._for_parametrize"], "tokens": 353}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ParameterSet(namedtuple(\"ParameterSet\", \"values, marks, id\")):\n\n @classmethod\n def _for_parametrize(cls, argnames, argvalues, func, config, function_definition):\n if not isinstance(argnames, (tuple, list)):\n argnames = [x.strip() for x in argnames.split(\",\") if x.strip()]\n force_tuple = len(argnames) == 1\n else:\n force_tuple = False\n parameters = [\n ParameterSet.extract_from(x, force_tuple=force_tuple) for x in argvalues\n ]\n del argvalues\n\n if parameters:\n # check all parameter sets have the correct number of values\n for param in parameters:\n if len(param.values) != len(argnames):\n msg = (\n '{nodeid}: in \"parametrize\" the number of names ({names_len}):\\n'\n \" {names}\\n\"\n \"must be equal to the number of values ({values_len}):\\n\"\n \" {values}\"\n )\n fail(\n msg.format(\n nodeid=function_definition.nodeid,\n values=param.values,\n names=argnames,\n names_len=len(argnames),\n values_len=len(param.values),\n ),\n pytrace=False,\n )\n else:\n # empty parameter set (likely computed at runtime): create a single\n # parameter set with NOTSET values, with the \"empty parameter set\" mark applied to it\n mark = get_empty_parameterset_mark(config, argnames, func)\n parameters.append(\n ParameterSet(values=(NOTSET,) * len(argnames), marks=[mark], id=None)\n )\n return argnames, parameters", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_Mark_Mark.combined_with.return.Mark_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_Mark_Mark.combined_with.return.Mark_", "embedding": null, "metadata": {"file_path": "src/_pytest/mark/structures.py", "file_name": "structures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 147, "end_line": 167, "span_ids": ["Mark", "Mark.combined_with"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s(frozen=True)\nclass Mark(object):\n #: name of the mark\n name = attr.ib(type=str)\n #: positional arguments of the mark decorator\n args = attr.ib() # List[object]\n #: keyword arguments of the mark decorator\n kwargs = attr.ib() # Dict[str, object]\n\n def combined_with(self, other):\n \"\"\"\n :param other: the mark to combine with\n :type other: Mark\n :rtype: Mark\n\n combines by appending args and merging the mappings\n \"\"\"\n assert self.name == other.name\n return Mark(\n self.name, self.args + other.args, dict(self.kwargs, **other.kwargs)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_MarkDecorator_MarkDecorator.__call__.return.self_with_args_args_k": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_MarkDecorator_MarkDecorator.__call__.return.self_with_args_args_k", "embedding": null, "metadata": {"file_path": "src/_pytest/mark/structures.py", "file_name": "structures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 170, "end_line": 241, "span_ids": ["MarkDecorator.__repr__", "MarkDecorator.markname", "MarkDecorator", "MarkDecorator.__eq__", "MarkDecorator.with_args", "MarkDecorator.__call__"], "tokens": 615}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s\nclass MarkDecorator(object):\n \"\"\" A decorator for test functions and test classes. When applied\n it will create :class:`MarkInfo` objects which may be\n :ref:`retrieved by hooks as item keywords `.\n MarkDecorator instances are often created like this::\n\n mark1 = pytest.mark.NAME # simple MarkDecorator\n mark2 = pytest.mark.NAME(name1=value) # parametrized MarkDecorator\n\n and can then be applied as decorators to test functions::\n\n @mark2\n def test_function():\n pass\n\n When a MarkDecorator instance is called it does the following:\n 1. If called with a single class as its only positional argument and no\n additional keyword arguments, it attaches itself to the class so it\n gets applied automatically to all test cases found in that class.\n 2. If called with a single function as its only positional argument and\n no additional keyword arguments, it attaches a MarkInfo object to the\n function, containing all the arguments already stored internally in\n the MarkDecorator.\n 3. When called in any other case, it performs a 'fake construction' call,\n i.e. it returns a new MarkDecorator instance with the original\n MarkDecorator's content updated with the arguments passed to this\n call.\n\n Note: The rules above prevent MarkDecorator objects from storing only a\n single function or class reference as their positional argument with no\n additional keyword or positional arguments.\n\n \"\"\"\n\n mark = attr.ib(validator=attr.validators.instance_of(Mark))\n\n name = alias(\"mark.name\")\n args = alias(\"mark.args\")\n kwargs = alias(\"mark.kwargs\")\n\n @property\n def markname(self):\n return self.name # for backward-compat (2.4.1 had this attr)\n\n def __eq__(self, other):\n return self.mark == other.mark if isinstance(other, MarkDecorator) else False\n\n def __repr__(self):\n return \"\" % (self.mark,)\n\n def with_args(self, *args, **kwargs):\n \"\"\" return a MarkDecorator with extra arguments added\n\n unlike call this can be used even if the sole argument is a callable/class\n\n :return: MarkDecorator\n \"\"\"\n\n mark = Mark(self.name, args, kwargs)\n return self.__class__(self.mark.combined_with(mark))\n\n def __call__(self, *args, **kwargs):\n \"\"\" if passed a single callable argument: decorate it with mark info.\n otherwise add *args/**kwargs in-place to mark information. \"\"\"\n if args and not kwargs:\n func = args[0]\n is_class = inspect.isclass(func)\n if len(args) == 1 and (istestfunc(func) or is_class):\n store_mark(func, self.mark)\n return func\n return self.with_args(*args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_get_unpacked_marks_store_mark.obj.pytestmark.get_unpacked_marks_obj_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_get_unpacked_marks_store_mark.obj.pytestmark.get_unpacked_marks_obj_", "embedding": null, "metadata": {"file_path": "src/_pytest/mark/structures.py", "file_name": "structures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 244, "end_line": 277, "span_ids": ["store_mark", "get_unpacked_marks", "normalize_mark_list"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_unpacked_marks(obj):\n \"\"\"\n obtain the unpacked marks that are stored on an object\n \"\"\"\n mark_list = getattr(obj, \"pytestmark\", [])\n if not isinstance(mark_list, list):\n mark_list = [mark_list]\n return normalize_mark_list(mark_list)\n\n\ndef normalize_mark_list(mark_list):\n \"\"\"\n normalizes marker decorating helpers to mark objects\n\n :type mark_list: List[Union[Mark, Markdecorator]]\n :rtype: List[Mark]\n \"\"\"\n extracted = [\n getattr(mark, \"mark\", mark) for mark in mark_list\n ] # unpack MarkDecorator\n for mark in extracted:\n if not isinstance(mark, Mark):\n raise TypeError(\"got {!r} instead of Mark\".format(mark))\n return [x for x in extracted if isinstance(x, Mark)]\n\n\ndef store_mark(obj, mark):\n \"\"\"store a Mark on an object\n this is used to implement the Mark declarations/decorators correctly\n \"\"\"\n assert isinstance(mark, Mark), mark\n # always reassign name to avoid updating pytestmark\n # in a reference that was only borrowed\n obj.pytestmark = get_unpacked_marks(obj) + [mark]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_MarkGenerator_MarkGenerator.__getattr__.return.MarkDecorator_Mark_name_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_MarkGenerator_MarkGenerator.__getattr__.return.MarkDecorator_Mark_name_", "embedding": null, "metadata": {"file_path": "src/_pytest/mark/structures.py", "file_name": "structures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 280, "end_line": 324, "span_ids": ["MarkGenerator.__getattr__", "MarkGenerator"], "tokens": 419}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class MarkGenerator(object):\n \"\"\" Factory for :class:`MarkDecorator` objects - exposed as\n a ``pytest.mark`` singleton instance. Example::\n\n import pytest\n @pytest.mark.slowtest\n def test_function():\n pass\n\n will set a 'slowtest' :class:`MarkInfo` object\n on the ``test_function`` object. \"\"\"\n\n _config = None\n _markers = set()\n\n def __getattr__(self, name):\n if name[0] == \"_\":\n raise AttributeError(\"Marker name must NOT start with underscore\")\n\n if self._config is not None:\n # We store a set of markers as a performance optimisation - if a mark\n # name is in the set we definitely know it, but a mark may be known and\n # not in the set. We therefore start by updating the set!\n if name not in self._markers:\n for line in self._config.getini(\"markers\"):\n # example lines: \"skipif(condition): skip the given test if...\"\n # or \"hypothesis: tests which use Hypothesis\", so to get the\n # marker name we split on both `:` and `(`.\n marker = line.split(\":\")[0].split(\"(\")[0].strip()\n self._markers.add(marker)\n\n # If the name is not in the set of known marks after updating,\n # then it really is time to issue a warning or an error.\n if name not in self._markers:\n if self._config.option.strict:\n fail(\"{!r} is not a registered marker\".format(name), pytrace=False)\n else:\n warnings.warn(\n \"Unknown pytest.mark.%s - is this a typo? You can register \"\n \"custom marks to avoid this warning - for details, see \"\n \"https://docs.pytest.org/en/latest/mark.html\" % name,\n PytestUnknownMarkWarning,\n )\n\n return MarkDecorator(Mark(name, (), {}))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_MARK_GEN_NodeKeywords.__repr__.return._NodeKeywords_for_node_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_MARK_GEN_NodeKeywords.__repr__.return._NodeKeywords_for_node_", "embedding": null, "metadata": {"file_path": "src/_pytest/mark/structures.py", "file_name": "structures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 330, "end_line": 367, "span_ids": ["NodeKeywords._seen", "NodeKeywords.__len__", "NodeKeywords.__delitem__", "NodeKeywords.__setitem__", "NodeKeywords.__getitem__", "NodeKeywords.__repr__", "impl:3", "NodeKeywords.__iter__", "NodeKeywords"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "MARK_GEN = MarkGenerator()\n\n\nclass NodeKeywords(MappingMixin):\n def __init__(self, node):\n self.node = node\n self.parent = node.parent\n self._markers = {node.name: True}\n\n def __getitem__(self, key):\n try:\n return self._markers[key]\n except KeyError:\n if self.parent is None:\n raise\n return self.parent.keywords[key]\n\n def __setitem__(self, key, value):\n self._markers[key] = value\n\n def __delitem__(self, key):\n raise ValueError(\"cannot delete key in keywords dict\")\n\n def __iter__(self):\n seen = self._seen()\n return iter(seen)\n\n def _seen(self):\n seen = set(self._markers)\n if self.parent is not None:\n seen.update(self.parent.keywords)\n return seen\n\n def __len__(self):\n return len(self._seen())\n\n def __repr__(self):\n return \"\" % (self.node,)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_NodeMarkers_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/mark/structures.py_NodeMarkers_", "embedding": null, "metadata": {"file_path": "src/_pytest/mark/structures.py", "file_name": "structures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 370, "end_line": 399, "span_ids": ["NodeMarkers.update", "NodeMarkers.__iter__", "NodeMarkers", "NodeMarkers.find"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s(cmp=False, hash=False)\nclass NodeMarkers(object):\n \"\"\"\n internal structure for storing marks belonging to a node\n\n ..warning::\n\n unstable api\n\n \"\"\"\n\n own_markers = attr.ib(default=attr.Factory(list))\n\n def update(self, add_markers):\n \"\"\"update the own markers\n \"\"\"\n self.own_markers.extend(add_markers)\n\n def find(self, name):\n \"\"\"\n find markers in own nodes or parent nodes\n needs a better place\n \"\"\"\n for mark in self.own_markers:\n if mark.name == name:\n yield mark\n\n def __iter__(self):\n return iter(self.own_markers)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py__monkeypatching_and_mo_monkeypatch.mpatch_undo_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py__monkeypatching_and_mo_monkeypatch.mpatch_undo_", "embedding": null, "metadata": {"file_path": "src/_pytest/monkeypatch.py", "file_name": "monkeypatch.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 42, "span_ids": ["imports", "docstring", "monkeypatch", "impl"], "tokens": 271}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" monkeypatching and mocking functionality. \"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport re\nimport sys\nimport warnings\nfrom contextlib import contextmanager\n\nimport six\n\nimport pytest\nfrom _pytest.fixtures import fixture\nfrom _pytest.pathlib import Path\n\nRE_IMPORT_ERROR_NAME = re.compile(r\"^No module named (.*)$\")\n\n\n@fixture\ndef monkeypatch():\n \"\"\"The returned ``monkeypatch`` fixture provides these\n helper methods to modify objects, dictionaries or os.environ::\n\n monkeypatch.setattr(obj, name, value, raising=True)\n monkeypatch.delattr(obj, name, raising=True)\n monkeypatch.setitem(mapping, name, value)\n monkeypatch.delitem(obj, name, raising=True)\n monkeypatch.setenv(name, value, prepend=False)\n monkeypatch.delenv(name, raising=True)\n monkeypatch.syspath_prepend(path)\n monkeypatch.chdir(path)\n\n All modifications will be undone after the requesting\n test function or fixture has finished. The ``raising``\n parameter determines if a KeyError or AttributeError\n will be raised if the set/deletion operation has no target.\n \"\"\"\n mpatch = MonkeyPatch()\n yield mpatch\n mpatch.undo()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_resolve_resolve.return.found": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_resolve_resolve.return.found", "embedding": null, "metadata": {"file_path": "src/_pytest/monkeypatch.py", "file_name": "monkeypatch.py", "file_type": "text/x-python", "category": "implementation", "start_line": 45, "end_line": 71, "span_ids": ["resolve"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def resolve(name):\n # simplified from zope.dottedname\n parts = name.split(\".\")\n\n used = parts.pop(0)\n found = __import__(used)\n for part in parts:\n used += \".\" + part\n try:\n found = getattr(found, part)\n except AttributeError:\n pass\n else:\n continue\n # we use explicit un-nesting of the handling block in order\n # to avoid nested exceptions on python 3\n try:\n __import__(used)\n except ImportError as ex:\n # str is used for py2 vs py3\n expected = str(ex).split()[-1]\n if expected == used:\n raise\n else:\n raise ImportError(\"import error in %s: %s\" % (used, ex))\n found = annotated_getattr(found, part, used)\n return found", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_annotated_getattr_notset.Notset_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_annotated_getattr_notset.Notset_", "embedding": null, "metadata": {"file_path": "src/_pytest/monkeypatch.py", "file_name": "monkeypatch.py", "file_type": "text/x-python", "category": "implementation", "start_line": 74, "end_line": 99, "span_ids": ["derive_importpath", "Notset", "impl:3", "annotated_getattr", "Notset.__repr__"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def annotated_getattr(obj, name, ann):\n try:\n obj = getattr(obj, name)\n except AttributeError:\n raise AttributeError(\n \"%r object at %s has no attribute %r\" % (type(obj).__name__, ann, name)\n )\n return obj\n\n\ndef derive_importpath(import_path, raising):\n if not isinstance(import_path, six.string_types) or \".\" not in import_path:\n raise TypeError(\"must be absolute import path string, not %r\" % (import_path,))\n module, attr = import_path.rsplit(\".\", 1)\n target = resolve(module)\n if raising:\n annotated_getattr(target, attr, ann=module)\n return attr, target\n\n\nclass Notset(object):\n def __repr__(self):\n return \"\"\n\n\nnotset = Notset()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_MonkeyPatch_MonkeyPatch.context.try_.finally_.m_undo_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_MonkeyPatch_MonkeyPatch.context.try_.finally_.m_undo_", "embedding": null, "metadata": {"file_path": "src/_pytest/monkeypatch.py", "file_name": "monkeypatch.py", "file_type": "text/x-python", "category": "implementation", "start_line": 102, "end_line": 133, "span_ids": ["MonkeyPatch", "MonkeyPatch.context"], "tokens": 235}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class MonkeyPatch(object):\n \"\"\" Object returned by the ``monkeypatch`` fixture keeping a record of setattr/item/env/syspath changes.\n \"\"\"\n\n def __init__(self):\n self._setattr = []\n self._setitem = []\n self._cwd = None\n self._savesyspath = None\n\n @contextmanager\n def context(self):\n \"\"\"\n Context manager that returns a new :class:`MonkeyPatch` object which\n undoes any patching done inside the ``with`` block upon exit:\n\n .. code-block:: python\n\n import functools\n def test_partial(monkeypatch):\n with monkeypatch.context() as m:\n m.setattr(functools, \"partial\", 3)\n\n Useful in situations where it is desired to undo some patches before the test ends,\n such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples\n of this see `#3290 `_.\n \"\"\"\n m = MonkeyPatch()\n try:\n yield m\n finally:\n m.undo()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_MonkeyPatch.setattr_MonkeyPatch.setattr.setattr_target_name_val": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_MonkeyPatch.setattr_MonkeyPatch.setattr.setattr_target_name_val", "embedding": null, "metadata": {"file_path": "src/_pytest/monkeypatch.py", "file_name": "monkeypatch.py", "file_type": "text/x-python", "category": "implementation", "start_line": 135, "end_line": 170, "span_ids": ["MonkeyPatch.setattr"], "tokens": 327}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class MonkeyPatch(object):\n\n def setattr(self, target, name, value=notset, raising=True):\n \"\"\" Set attribute value on target, memorizing the old value.\n By default raise AttributeError if the attribute did not exist.\n\n For convenience you can specify a string as ``target`` which\n will be interpreted as a dotted import path, with the last part\n being the attribute name. Example:\n ``monkeypatch.setattr(\"os.getcwd\", lambda: \"/\")``\n would set the ``getcwd`` function of the ``os`` module.\n\n The ``raising`` value determines if the setattr should fail\n if the attribute is not already present (defaults to True\n which means it will raise).\n \"\"\"\n __tracebackhide__ = True\n import inspect\n\n if value is notset:\n if not isinstance(target, six.string_types):\n raise TypeError(\n \"use setattr(target, name, value) or \"\n \"setattr(target, value) with target being a dotted \"\n \"import string\"\n )\n value = name\n name, target = derive_importpath(target, raising)\n\n oldval = getattr(target, name, notset)\n if raising and oldval is notset:\n raise AttributeError(\"%r has no attribute %r\" % (target, name))\n\n # avoid class descriptors like staticmethod/classmethod\n if inspect.isclass(target):\n oldval = target.__dict__.get(name, notset)\n self._setattr.append((target, name, oldval))\n setattr(target, name, value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_MonkeyPatch.delattr_MonkeyPatch.delattr.if_not_hasattr_target_na.else_.delattr_target_name_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_MonkeyPatch.delattr_MonkeyPatch.delattr.if_not_hasattr_target_na.else_.delattr_target_name_", "embedding": null, "metadata": {"file_path": "src/_pytest/monkeypatch.py", "file_name": "monkeypatch.py", "file_type": "text/x-python", "category": "implementation", "start_line": 172, "end_line": 204, "span_ids": ["MonkeyPatch.delattr"], "tokens": 268}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class MonkeyPatch(object):\n\n def delattr(self, target, name=notset, raising=True):\n \"\"\" Delete attribute ``name`` from ``target``, by default raise\n AttributeError it the attribute did not previously exist.\n\n If no ``name`` is specified and ``target`` is a string\n it will be interpreted as a dotted import path with the\n last part being the attribute name.\n\n If ``raising`` is set to False, no exception will be raised if the\n attribute is missing.\n \"\"\"\n __tracebackhide__ = True\n import inspect\n\n if name is notset:\n if not isinstance(target, six.string_types):\n raise TypeError(\n \"use delattr(target, name) or \"\n \"delattr(target) with target being a dotted \"\n \"import string\"\n )\n name, target = derive_importpath(target, raising)\n\n if not hasattr(target, name):\n if raising:\n raise AttributeError(name)\n else:\n oldval = getattr(target, name, notset)\n # Avoid class descriptors like staticmethod/classmethod.\n if inspect.isclass(target):\n oldval = target.__dict__.get(name, notset)\n self._setattr.append((target, name, oldval))\n delattr(target, name)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_MonkeyPatch.setitem_MonkeyPatch._warn_if_env_name_is_not_str.if_six_PY2_and_not_isinst.warnings_warn_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_MonkeyPatch.setitem_MonkeyPatch._warn_if_env_name_is_not_str.if_six_PY2_and_not_isinst.warnings_warn_", "embedding": null, "metadata": {"file_path": "src/_pytest/monkeypatch.py", "file_name": "monkeypatch.py", "file_type": "text/x-python", "category": "implementation", "start_line": 206, "end_line": 231, "span_ids": ["MonkeyPatch._warn_if_env_name_is_not_str", "MonkeyPatch.delitem", "MonkeyPatch.setitem"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class MonkeyPatch(object):\n\n def setitem(self, dic, name, value):\n \"\"\" Set dictionary entry ``name`` to value. \"\"\"\n self._setitem.append((dic, name, dic.get(name, notset)))\n dic[name] = value\n\n def delitem(self, dic, name, raising=True):\n \"\"\" Delete ``name`` from dict. Raise KeyError if it doesn't exist.\n\n If ``raising`` is set to False, no exception will be raised if the\n key is missing.\n \"\"\"\n if name not in dic:\n if raising:\n raise KeyError(name)\n else:\n self._setitem.append((dic, name, dic.get(name, notset)))\n del dic[name]\n\n def _warn_if_env_name_is_not_str(self, name):\n \"\"\"On Python 2, warn if the given environment variable name is not a native str (#4056)\"\"\"\n if six.PY2 and not isinstance(name, str):\n warnings.warn(\n pytest.PytestWarning(\n \"Environment variable name {!r} should be str\".format(name)\n )\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_MonkeyPatch.setenv_MonkeyPatch.delenv.self_delitem_os_environ_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_MonkeyPatch.setenv_MonkeyPatch.delenv.self_delitem_os_environ_", "embedding": null, "metadata": {"file_path": "src/_pytest/monkeypatch.py", "file_name": "monkeypatch.py", "file_type": "text/x-python", "category": "implementation", "start_line": 233, "end_line": 261, "span_ids": ["MonkeyPatch.setenv", "MonkeyPatch.delenv"], "tokens": 276}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class MonkeyPatch(object):\n\n def setenv(self, name, value, prepend=None):\n \"\"\" Set environment variable ``name`` to ``value``. If ``prepend``\n is a character, read the current environment variable value\n and prepend the ``value`` adjoined with the ``prepend`` character.\"\"\"\n if not isinstance(value, str):\n warnings.warn(\n pytest.PytestWarning(\n \"Value of environment variable {name} type should be str, but got \"\n \"{value!r} (type: {type}); converted to str implicitly\".format(\n name=name, value=value, type=type(value).__name__\n )\n ),\n stacklevel=2,\n )\n value = str(value)\n if prepend and name in os.environ:\n value = value + prepend + os.environ[name]\n self._warn_if_env_name_is_not_str(name)\n self.setitem(os.environ, name, value)\n\n def delenv(self, name, raising=True):\n \"\"\" Delete ``name`` from the environment. Raise KeyError if it does\n not exist.\n\n If ``raising`` is set to False, no exception will be raised if the\n environment variable is missing.\n \"\"\"\n self._warn_if_env_name_is_not_str(name)\n self.delitem(os.environ, name, raising=raising)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_MonkeyPatch.syspath_prepend_MonkeyPatch.chdir.if_hasattr_path_chdir_.else_.os_chdir_path_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_MonkeyPatch.syspath_prepend_MonkeyPatch.chdir.if_hasattr_path_chdir_.else_.os_chdir_path_", "embedding": null, "metadata": {"file_path": "src/_pytest/monkeypatch.py", "file_name": "monkeypatch.py", "file_type": "text/x-python", "category": "implementation", "start_line": 263, "end_line": 298, "span_ids": ["MonkeyPatch.syspath_prepend", "MonkeyPatch.chdir"], "tokens": 323}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class MonkeyPatch(object):\n\n def syspath_prepend(self, path):\n \"\"\" Prepend ``path`` to ``sys.path`` list of import locations. \"\"\"\n from pkg_resources import fixup_namespace_packages\n\n if self._savesyspath is None:\n self._savesyspath = sys.path[:]\n sys.path.insert(0, str(path))\n\n # https://github.com/pypa/setuptools/blob/d8b901bc/docs/pkg_resources.txt#L162-L171\n fixup_namespace_packages(str(path))\n\n # A call to syspathinsert() usually means that the caller wants to\n # import some dynamically created files, thus with python3 we\n # invalidate its import caches.\n # This is especially important when any namespace package is in used,\n # since then the mtime based FileFinder cache (that gets created in\n # this case already) gets not invalidated when writing the new files\n # quickly afterwards.\n if sys.version_info >= (3, 3):\n from importlib import invalidate_caches\n\n invalidate_caches()\n\n def chdir(self, path):\n \"\"\" Change the current working directory to the specified path.\n Path can be a string or a py.path.local object.\n \"\"\"\n if self._cwd is None:\n self._cwd = os.getcwd()\n if hasattr(path, \"chdir\"):\n path.chdir()\n elif isinstance(path, Path):\n # modern python uses the fspath protocol here LEGACY\n os.chdir(str(path))\n else:\n os.chdir(path)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_MonkeyPatch.undo_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/monkeypatch.py_MonkeyPatch.undo_", "embedding": null, "metadata": {"file_path": "src/_pytest/monkeypatch.py", "file_name": "monkeypatch.py", "file_type": "text/x-python", "category": "implementation", "start_line": 300, "end_line": 336, "span_ids": ["MonkeyPatch.undo"], "tokens": 302}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class MonkeyPatch(object):\n\n def undo(self):\n \"\"\" Undo previous changes. This call consumes the\n undo stack. Calling it a second time has no effect unless\n you do more monkeypatching after the undo call.\n\n There is generally no need to call `undo()`, since it is\n called automatically during tear-down.\n\n Note that the same `monkeypatch` fixture is used across a\n single test function invocation. If `monkeypatch` is used both by\n the test function itself and one of the test fixtures,\n calling `undo()` will undo all of the changes made in\n both functions.\n \"\"\"\n for obj, name, value in reversed(self._setattr):\n if value is not notset:\n setattr(obj, name, value)\n else:\n delattr(obj, name)\n self._setattr[:] = []\n for dictionary, name, value in reversed(self._setitem):\n if value is notset:\n try:\n del dictionary[name]\n except KeyError:\n pass # was already deleted, so we have the desired state\n else:\n dictionary[name] = value\n self._setitem[:] = []\n if self._savesyspath is not None:\n sys.path[:] = self._savesyspath\n self._savesyspath = None\n\n if self._cwd is not None:\n os.chdir(self._cwd)\n self._cwd = None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_from___future___import_ab__splitnode.return.parts": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_from___future___import_ab__splitnode.return.parts", "embedding": null, "metadata": {"file_path": "src/_pytest/nodes.py", "file_name": "nodes.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 42, "span_ids": ["imports", "_splitnode", "impl"], "tokens": 289}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport warnings\n\nimport py\nimport six\n\nimport _pytest._code\nfrom _pytest.compat import getfslineno\nfrom _pytest.mark.structures import NodeKeywords\nfrom _pytest.outcomes import fail\n\nSEP = \"/\"\n\ntracebackcutdir = py.path.local(_pytest.__file__).dirpath()\n\n\ndef _splitnode(nodeid):\n \"\"\"Split a nodeid into constituent 'parts'.\n\n Node IDs are strings, and can be things like:\n ''\n 'testing/code'\n 'testing/code/test_excinfo.py'\n 'testing/code/test_excinfo.py::TestFormattedExcinfo'\n\n Return values are lists e.g.\n []\n ['testing', 'code']\n ['testing', 'code', 'test_excinfo.py']\n ['testing', 'code', 'test_excinfo.py', 'TestFormattedExcinfo', '()']\n \"\"\"\n if nodeid == \"\":\n # If there is no root node at all, return an empty list so the caller's logic can remain sane\n return []\n parts = nodeid.split(SEP)\n # Replace single last element 'test_foo.py::Bar' with multiple elements 'test_foo.py', 'Bar'\n parts[-1:] = parts[-1].split(\"::\")\n return parts", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_ischildnode_ischildnode.return.node_parts_len_base_par": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_ischildnode_ischildnode.return.node_parts_len_base_par", "embedding": null, "metadata": {"file_path": "src/_pytest/nodes.py", "file_name": "nodes.py", "file_type": "text/x-python", "category": "implementation", "start_line": 45, "end_line": 54, "span_ids": ["ischildnode"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def ischildnode(baseid, nodeid):\n \"\"\"Return True if the nodeid is a child node of the baseid.\n\n E.g. 'foo/bar::Baz' is a child of 'foo', 'foo/bar' and 'foo/bar::Baz', but not of 'foo/blorp'\n \"\"\"\n base_parts = _splitnode(baseid)\n node_parts = _splitnode(nodeid)\n if len(node_parts) < len(base_parts):\n return False\n return node_parts[: len(base_parts)] == base_parts", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_Node_Node.__repr__.return._s_s_self___class": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_Node_Node.__repr__.return._s_s_self___class", "embedding": null, "metadata": {"file_path": "src/_pytest/nodes.py", "file_name": "nodes.py", "file_type": "text/x-python", "category": "implementation", "start_line": 57, "end_line": 105, "span_ids": ["Node.ihook", "Node", "Node.__repr__"], "tokens": 361}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Node(object):\n \"\"\" base class for Collector and Item the test collection tree.\n Collector subclasses have children, Items are terminal nodes.\"\"\"\n\n def __init__(\n self, name, parent=None, config=None, session=None, fspath=None, nodeid=None\n ):\n #: a unique name within the scope of the parent node\n self.name = name\n\n #: the parent collector node.\n self.parent = parent\n\n #: the pytest config object\n self.config = config or parent.config\n\n #: the session this node is part of\n self.session = session or parent.session\n\n #: filesystem path where this node was collected from (can be None)\n self.fspath = fspath or getattr(parent, \"fspath\", None)\n\n #: keywords/markers collected from all scopes\n self.keywords = NodeKeywords(self)\n\n #: the marker objects belonging to this node\n self.own_markers = []\n\n #: allow adding of extra keywords to use for matching\n self.extra_keyword_matches = set()\n\n # used for storing artificial fixturedefs for direct parametrization\n self._name2pseudofixturedef = {}\n\n if nodeid is not None:\n assert \"::()\" not in nodeid\n self._nodeid = nodeid\n else:\n self._nodeid = self.parent.nodeid\n if self.name != \"()\":\n self._nodeid += \"::\" + self.name\n\n @property\n def ihook(self):\n \"\"\" fspath sensitive hook proxy used to call pytest hooks\"\"\"\n return self.session.gethookproxy(self.fspath)\n\n def __repr__(self):\n return \"<%s %s>\" % (self.__class__.__name__, getattr(self, \"name\", None))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_Node.warn_Node.warn.warnings_warn_explicit_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_Node.warn_Node.warn.warnings_warn_explicit_", "embedding": null, "metadata": {"file_path": "src/_pytest/nodes.py", "file_name": "nodes.py", "file_type": "text/x-python", "category": "implementation", "start_line": 107, "end_line": 137, "span_ids": ["Node.warn"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Node(object):\n\n def warn(self, warning):\n \"\"\"Issue a warning for this item.\n\n Warnings will be displayed after the test session, unless explicitly suppressed\n\n :param Warning warning: the warning instance to issue. Must be a subclass of PytestWarning.\n\n :raise ValueError: if ``warning`` instance is not a subclass of PytestWarning.\n\n Example usage:\n\n .. code-block:: python\n\n node.warn(PytestWarning(\"some message\"))\n\n \"\"\"\n from _pytest.warning_types import PytestWarning\n\n if not isinstance(warning, PytestWarning):\n raise ValueError(\n \"warning must be an instance of PytestWarning or subclass, got {!r}\".format(\n warning\n )\n )\n path, lineno = get_fslocation_from_item(self)\n warnings.warn_explicit(\n warning,\n category=None,\n filename=str(path),\n lineno=lineno + 1 if lineno is not None else None,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_Node._methods_for_ordering_no_Node.listchain.return.chain": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_Node._methods_for_ordering_no_Node.listchain.return.chain", "embedding": null, "metadata": {"file_path": "src/_pytest/nodes.py", "file_name": "nodes.py", "file_type": "text/x-python", "category": "implementation", "start_line": 139, "end_line": 163, "span_ids": ["Node.teardown", "Node.listchain", "Node.setup", "Node.warn", "Node.__hash__", "Node.nodeid"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Node(object):\n\n # methods for ordering nodes\n @property\n def nodeid(self):\n \"\"\" a ::-separated string denoting its collection tree address. \"\"\"\n return self._nodeid\n\n def __hash__(self):\n return hash(self.nodeid)\n\n def setup(self):\n pass\n\n def teardown(self):\n pass\n\n def listchain(self):\n \"\"\" return list of all parent collectors up to self,\n starting from root of collection tree. \"\"\"\n chain = []\n item = self\n while item is not None:\n chain.append(item)\n item = item.parent\n chain.reverse()\n return chain", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_Node.add_marker_Node.add_marker.if_append_.else_.self_own_markers_insert_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_Node.add_marker_Node.add_marker.if_append_.else_.self_own_markers_insert_0", "embedding": null, "metadata": {"file_path": "src/_pytest/nodes.py", "file_name": "nodes.py", "file_type": "text/x-python", "category": "implementation", "start_line": 165, "end_line": 183, "span_ids": ["Node.add_marker"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Node(object):\n\n def add_marker(self, marker, append=True):\n \"\"\"dynamically add a marker object to the node.\n\n :type marker: ``str`` or ``pytest.mark.*`` object\n :param marker:\n ``append=True`` whether to append the marker,\n if ``False`` insert at position ``0``.\n \"\"\"\n from _pytest.mark import MarkDecorator, MARK_GEN\n\n if isinstance(marker, six.string_types):\n marker = getattr(MARK_GEN, marker)\n elif not isinstance(marker, MarkDecorator):\n raise ValueError(\"is not a string or pytest.mark.* Marker\")\n self.keywords[marker.name] = marker\n if append:\n self.own_markers.append(marker.mark)\n else:\n self.own_markers.insert(0, marker.mark)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_Node.iter_markers_Node._prunetraceback.pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_Node.iter_markers_Node._prunetraceback.pass", "embedding": null, "metadata": {"file_path": "src/_pytest/nodes.py", "file_name": "nodes.py", "file_type": "text/x-python", "category": "implementation", "start_line": 185, "end_line": 241, "span_ids": ["Node.listextrakeywords", "Node._prunetraceback", "Node.iter_markers", "Node.addfinalizer", "Node.get_closest_marker", "Node.iter_markers_with_node", "Node.listnames", "Node.getparent"], "tokens": 432}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Node(object):\n\n def iter_markers(self, name=None):\n \"\"\"\n :param name: if given, filter the results by the name attribute\n\n iterate over all markers of the node\n \"\"\"\n return (x[1] for x in self.iter_markers_with_node(name=name))\n\n def iter_markers_with_node(self, name=None):\n \"\"\"\n :param name: if given, filter the results by the name attribute\n\n iterate over all markers of the node\n returns sequence of tuples (node, mark)\n \"\"\"\n for node in reversed(self.listchain()):\n for mark in node.own_markers:\n if name is None or getattr(mark, \"name\", None) == name:\n yield node, mark\n\n def get_closest_marker(self, name, default=None):\n \"\"\"return the first marker matching the name, from closest (for example function) to farther level (for example\n module level).\n\n :param default: fallback return value of no marker was found\n :param name: name to filter by\n \"\"\"\n return next(self.iter_markers(name=name), default)\n\n def listextrakeywords(self):\n \"\"\" Return a set of all extra keywords in self and any parents.\"\"\"\n extra_keywords = set()\n for item in self.listchain():\n extra_keywords.update(item.extra_keyword_matches)\n return extra_keywords\n\n def listnames(self):\n return [x.name for x in self.listchain()]\n\n def addfinalizer(self, fin):\n \"\"\" register a function to be called when this node is finalized.\n\n This method can only be called when this node is active\n in a setup chain, for example during self.setup().\n \"\"\"\n self.session._setupstate.addfinalizer(fin, self)\n\n def getparent(self, cls):\n \"\"\" get the next parent node (including ourself)\n which is an instance of the given class\"\"\"\n current = self\n while current and not isinstance(current, cls):\n current = current.parent\n return current\n\n def _prunetraceback(self, excinfo):\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_Node._repr_failure_py_Node.repr_failure._repr_failure_py": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_Node._repr_failure_py_Node.repr_failure._repr_failure_py", "embedding": null, "metadata": {"file_path": "src/_pytest/nodes.py", "file_name": "nodes.py", "file_type": "text/x-python", "category": "implementation", "start_line": 243, "end_line": 288, "span_ids": ["Node:3", "Node._repr_failure_py"], "tokens": 352}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Node(object):\n\n def _repr_failure_py(self, excinfo, style=None):\n if excinfo.errisinstance(fail.Exception):\n if not excinfo.value.pytrace:\n return six.text_type(excinfo.value)\n fm = self.session._fixturemanager\n if excinfo.errisinstance(fm.FixtureLookupError):\n return excinfo.value.formatrepr()\n tbfilter = True\n if self.config.getoption(\"fulltrace\", False):\n style = \"long\"\n else:\n tb = _pytest._code.Traceback([excinfo.traceback[-1]])\n self._prunetraceback(excinfo)\n if len(excinfo.traceback) == 0:\n excinfo.traceback = tb\n tbfilter = False # prunetraceback already does it\n if style == \"auto\":\n style = \"long\"\n # XXX should excinfo.getrepr record all data and toterminal() process it?\n if style is None:\n if self.config.getoption(\"tbstyle\", \"auto\") == \"short\":\n style = \"short\"\n else:\n style = \"long\"\n\n if self.config.getoption(\"verbose\", 0) > 1:\n truncate_locals = False\n else:\n truncate_locals = True\n\n try:\n os.getcwd()\n abspath = False\n except OSError:\n abspath = True\n\n return excinfo.getrepr(\n funcargs=True,\n abspath=abspath,\n showlocals=self.config.getoption(\"showlocals\", False),\n style=style,\n tbfilter=tbfilter,\n truncate_locals=truncate_locals,\n )\n\n repr_failure = _repr_failure_py", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_get_fslocation_from_item_get_fslocation_from_item.return.getattr_item_fspath_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_get_fslocation_from_item_get_fslocation_from_item.return.getattr_item_fspath_", "embedding": null, "metadata": {"file_path": "src/_pytest/nodes.py", "file_name": "nodes.py", "file_type": "text/x-python", "category": "implementation", "start_line": 291, "end_line": 306, "span_ids": ["get_fslocation_from_item"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def get_fslocation_from_item(item):\n \"\"\"Tries to extract the actual location from an item, depending on available attributes:\n\n * \"fslocation\": a pair (path, lineno)\n * \"obj\": a Python object that the item wraps.\n * \"fspath\": just a path\n\n :rtype: a tuple of (str|LocalPath, int) with filename and line number.\n \"\"\"\n result = getattr(item, \"location\", None)\n if result is not None:\n return result[:2]\n obj = getattr(item, \"obj\", None)\n if obj is not None:\n return getfslineno(obj)\n return getattr(item, \"fspath\", \"unknown location\"), -1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_Collector_Collector._prunetraceback.if_hasattr_self_fspath_.excinfo.traceback.ntraceback_filter_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_Collector_Collector._prunetraceback.if_hasattr_self_fspath_.excinfo.traceback.ntraceback_filter_", "embedding": null, "metadata": {"file_path": "src/_pytest/nodes.py", "file_name": "nodes.py", "file_type": "text/x-python", "category": "implementation", "start_line": 309, "end_line": 343, "span_ids": ["Collector._prunetraceback", "Collector.repr_failure", "Collector.CollectError", "Collector", "Collector.collect"], "tokens": 273}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Collector(Node):\n \"\"\" Collector instances create children through collect()\n and thus iteratively build a tree.\n \"\"\"\n\n class CollectError(Exception):\n \"\"\" an error during collection, contains a custom message. \"\"\"\n\n def collect(self):\n \"\"\" returns a list of children (items and collectors)\n for this collection node.\n \"\"\"\n raise NotImplementedError(\"abstract\")\n\n def repr_failure(self, excinfo):\n \"\"\" represent a collection failure. \"\"\"\n if excinfo.errisinstance(self.CollectError):\n exc = excinfo.value\n return str(exc.args[0])\n\n # Respect explicit tbstyle option, but default to \"short\"\n # (None._repr_failure_py defaults to \"long\" without \"fulltrace\" option).\n tbstyle = self.config.getoption(\"tbstyle\")\n if tbstyle == \"auto\":\n tbstyle = \"short\"\n\n return self._repr_failure_py(excinfo, style=tbstyle)\n\n def _prunetraceback(self, excinfo):\n if hasattr(self, \"fspath\"):\n traceback = excinfo.traceback\n ntraceback = traceback.cut(path=self.fspath)\n if ntraceback == traceback:\n ntraceback = ntraceback.cut(excludepath=tracebackcutdir)\n excinfo.traceback = ntraceback.filter()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py__check_initialpaths_for_relpath_FSCollector.__init__.super_FSCollector_self_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py__check_initialpaths_for_relpath_FSCollector.__init__.super_FSCollector_self_", "embedding": null, "metadata": {"file_path": "src/_pytest/nodes.py", "file_name": "nodes.py", "file_type": "text/x-python", "category": "implementation", "start_line": 346, "end_line": 375, "span_ids": ["_check_initialpaths_for_relpath", "FSCollector"], "tokens": 253}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _check_initialpaths_for_relpath(session, fspath):\n for initial_path in session._initialpaths:\n if fspath.common(initial_path) == initial_path:\n return fspath.relto(initial_path)\n\n\nclass FSCollector(Collector):\n def __init__(self, fspath, parent=None, config=None, session=None, nodeid=None):\n fspath = py.path.local(fspath) # xxx only for test_resultlog.py?\n name = fspath.basename\n if parent is not None:\n rel = fspath.relto(parent.fspath)\n if rel:\n name = rel\n name = name.replace(os.sep, SEP)\n self.fspath = fspath\n\n session = session or parent.session\n\n if nodeid is None:\n nodeid = self.fspath.relto(session.config.rootdir)\n\n if not nodeid:\n nodeid = _check_initialpaths_for_relpath(session, fspath)\n if nodeid and os.sep != SEP:\n nodeid = nodeid.replace(os.sep, SEP)\n\n super(FSCollector, self).__init__(\n name, parent, config, session, nodeid=nodeid, fspath=fspath\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_File_Item.__init__.self.user_properties._": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_File_Item.__init__.self.user_properties._", "embedding": null, "metadata": {"file_path": "src/_pytest/nodes.py", "file_name": "nodes.py", "file_type": "text/x-python", "category": "implementation", "start_line": 378, "end_line": 395, "span_ids": ["Item", "File"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class File(FSCollector):\n \"\"\" base class for collecting tests from a file. \"\"\"\n\n\nclass Item(Node):\n \"\"\" a basic test invocation item. Note that for a single function\n there might be multiple test invocation items.\n \"\"\"\n\n nextitem = None\n\n def __init__(self, name, parent=None, config=None, session=None, nodeid=None):\n super(Item, self).__init__(name, parent, config, session, nodeid=nodeid)\n self._report_sections = []\n\n #: user properties is a list of tuples (name, value) that holds user\n #: defined properties for this test.\n self.user_properties = []", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_Item.add_report_section_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nodes.py_Item.add_report_section_", "embedding": null, "metadata": {"file_path": "src/_pytest/nodes.py", "file_name": "nodes.py", "file_type": "text/x-python", "category": "implementation", "start_line": 397, "end_line": 429, "span_ids": ["Item.location", "Item.reportinfo", "Item.add_report_section"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Item(Node):\n\n def add_report_section(self, when, key, content):\n \"\"\"\n Adds a new report section, similar to what's done internally to add stdout and\n stderr captured output::\n\n item.add_report_section(\"call\", \"stdout\", \"report section contents\")\n\n :param str when:\n One of the possible capture states, ``\"setup\"``, ``\"call\"``, ``\"teardown\"``.\n :param str key:\n Name of the section, can be customized at will. Pytest uses ``\"stdout\"`` and\n ``\"stderr\"`` internally.\n\n :param str content:\n The full contents as a string.\n \"\"\"\n if content:\n self._report_sections.append((when, key, content))\n\n def reportinfo(self):\n return self.fspath, None, \"\"\n\n @property\n def location(self):\n try:\n return self._location\n except AttributeError:\n location = self.reportinfo()\n fspath = self.session._node_location_to_relpath(location[0])\n location = (fspath, location[1], str(location[2]))\n self._location = location\n return location", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nose.py__run_test_suites_writt_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/nose.py__run_test_suites_writt_", "embedding": null, "metadata": {"file_path": "src/_pytest/nose.py", "file_name": "nose.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 70, "span_ids": ["call_optional", "docstring", "pytest_runtest_makereport", "get_skip_exceptions", "teardown_nose", "is_potential_nosetest", "imports", "pytest_runtest_setup"], "tokens": 508}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" run test suites written for nose. \"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\n\nimport six\n\nimport pytest\nfrom _pytest import python\nfrom _pytest import runner\nfrom _pytest import unittest\nfrom _pytest.config import hookimpl\n\n\ndef get_skip_exceptions():\n skip_classes = set()\n for module_name in (\"unittest\", \"unittest2\", \"nose\"):\n mod = sys.modules.get(module_name)\n if hasattr(mod, \"SkipTest\"):\n skip_classes.add(mod.SkipTest)\n return tuple(skip_classes)\n\n\ndef pytest_runtest_makereport(item, call):\n if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()):\n # let's substitute the excinfo with a pytest.skip one\n call2 = runner.CallInfo.from_call(\n lambda: pytest.skip(six.text_type(call.excinfo.value)), call.when\n )\n call.excinfo = call2.excinfo\n\n\n@hookimpl(trylast=True)\ndef pytest_runtest_setup(item):\n if is_potential_nosetest(item):\n if not call_optional(item.obj, \"setup\"):\n # call module level setup if there is no object level one\n call_optional(item.parent.obj, \"setup\")\n # XXX this implies we only call teardown when setup worked\n item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item)\n\n\ndef teardown_nose(item):\n if is_potential_nosetest(item):\n if not call_optional(item.obj, \"teardown\"):\n call_optional(item.parent.obj, \"teardown\")\n # if hasattr(item.parent, '_nosegensetup'):\n # #call_optional(item._nosegensetup, 'teardown')\n # del item.parent._nosegensetup\n\n\ndef is_potential_nosetest(item):\n # extra check needed since we do not do nose style setup/teardown\n # on direct unittest style classes\n return isinstance(item, python.Function) and not isinstance(\n item, unittest.TestCaseFunction\n )\n\n\ndef call_optional(obj, name):\n method = getattr(obj, name, None)\n isfixture = hasattr(method, \"_pytestfixturefunction\")\n if method is not None and not isfixture and callable(method):\n # If there's any problems allow the exception to raise rather than\n # silently ignoring them\n method()\n return True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/outcomes.py___OutcomeException.__str__.__repr__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/outcomes.py___OutcomeException.__str__.__repr__", "embedding": null, "metadata": {"file_path": "src/_pytest/outcomes.py", "file_name": "outcomes.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 30, "span_ids": ["OutcomeException.__repr__", "docstring", "OutcomeException", "OutcomeException:3", "imports"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nexception classes and constants handling test outcomes\nas well as functions creating them\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\n\n\nclass OutcomeException(BaseException):\n \"\"\" OutcomeException and its subclass instances indicate and\n contain info about test and collection outcomes.\n \"\"\"\n\n def __init__(self, msg=None, pytrace=True):\n BaseException.__init__(self, msg)\n self.msg = msg\n self.pytrace = pytrace\n\n def __repr__(self):\n if self.msg:\n val = self.msg\n if isinstance(val, bytes):\n val = val.decode(\"UTF-8\", errors=\"replace\")\n return val\n return \"<%s instance>\" % (self.__class__.__name__,)\n\n __str__ = __repr__", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/outcomes.py_TEST_OUTCOME_exit.Exception.Exit": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/outcomes.py_TEST_OUTCOME_exit.Exception.Exit", "embedding": null, "metadata": {"file_path": "src/_pytest/outcomes.py", "file_name": "outcomes.py", "file_type": "text/x-python", "category": "implementation", "start_line": 33, "end_line": 75, "span_ids": ["impl", "Failed", "exit", "Skipped", "Exit", "impl:3"], "tokens": 264}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "TEST_OUTCOME = (OutcomeException, Exception)\n\n\nclass Skipped(OutcomeException):\n # XXX hackish: on 3k we fake to live in the builtins\n # in order to have Skipped exception printing shorter/nicer\n __module__ = \"builtins\"\n\n def __init__(self, msg=None, pytrace=True, allow_module_level=False):\n OutcomeException.__init__(self, msg=msg, pytrace=pytrace)\n self.allow_module_level = allow_module_level\n\n\nclass Failed(OutcomeException):\n \"\"\" raised from an explicit call to pytest.fail() \"\"\"\n\n __module__ = \"builtins\"\n\n\nclass Exit(Exception):\n \"\"\" raised for immediate program exits (no tracebacks/summaries)\"\"\"\n\n def __init__(self, msg=\"unknown reason\", returncode=None):\n self.msg = msg\n self.returncode = returncode\n super(Exit, self).__init__(msg)\n\n\n# exposed helper methods\n\n\ndef exit(msg, returncode=None):\n \"\"\"\n Exit testing process.\n\n :param str msg: message to display upon exit.\n :param int returncode: return code to be used when exiting pytest.\n \"\"\"\n __tracebackhide__ = True\n raise Exit(msg, returncode)\n\n\nexit.Exception = Exit", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/outcomes.py_skip_skip.raise_Skipped_msg_msg_al": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/outcomes.py_skip_skip.raise_Skipped_msg_msg_al", "embedding": null, "metadata": {"file_path": "src/_pytest/outcomes.py", "file_name": "outcomes.py", "file_type": "text/x-python", "category": "implementation", "start_line": 78, "end_line": 101, "span_ids": ["skip"], "tokens": 248}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def skip(msg=\"\", **kwargs):\n \"\"\"\n Skip an executing test with the given message.\n\n This function should be called only during testing (setup, call or teardown) or\n during collection by using the ``allow_module_level`` flag. This function can\n be called in doctests as well.\n\n :kwarg bool allow_module_level: allows this function to be called at\n module level, skipping the rest of the module. Default to False.\n\n .. note::\n It is better to use the :ref:`pytest.mark.skipif ref` marker when possible to declare a test to be\n skipped under certain conditions like mismatching platforms or\n dependencies.\n Similarly, use the ``# doctest: +SKIP`` directive (see `doctest.SKIP\n `_)\n to skip a doctest statically.\n \"\"\"\n __tracebackhide__ = True\n allow_module_level = kwargs.pop(\"allow_module_level\", False)\n if kwargs:\n raise TypeError(\"unexpected keyword arguments: {}\".format(sorted(kwargs)))\n raise Skipped(msg=msg, allow_module_level=allow_module_level)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/outcomes.py_skip.Exception_xfail.Exception.XFailed": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/outcomes.py_skip.Exception_xfail.Exception.XFailed", "embedding": null, "metadata": {"file_path": "src/_pytest/outcomes.py", "file_name": "outcomes.py", "file_type": "text/x-python", "category": "implementation", "start_line": 104, "end_line": 140, "span_ids": ["fail", "XFailed", "impl:9", "impl:7", "impl:5", "xfail"], "tokens": 227}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "skip.Exception = Skipped\n\n\ndef fail(msg=\"\", pytrace=True):\n \"\"\"\n Explicitly fail an executing test with the given message.\n\n :param str msg: the message to show the user as reason for the failure.\n :param bool pytrace: if false the msg represents the full failure information and no\n python traceback will be reported.\n \"\"\"\n __tracebackhide__ = True\n raise Failed(msg=msg, pytrace=pytrace)\n\n\nfail.Exception = Failed\n\n\nclass XFailed(fail.Exception):\n \"\"\" raised from an explicit call to pytest.xfail() \"\"\"\n\n\ndef xfail(reason=\"\"):\n \"\"\"\n Imperatively xfail an executing test or setup functions with the given reason.\n\n This function should be called only during testing (setup, call or teardown).\n\n .. note::\n It is better to use the :ref:`pytest.mark.xfail ref` marker when possible to declare a test to be\n xfailed under certain conditions like known bugs or missing features.\n \"\"\"\n __tracebackhide__ = True\n raise XFailed(reason)\n\n\nxfail.Exception = XFailed", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/outcomes.py_importorskip_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/outcomes.py_importorskip_", "embedding": null, "metadata": {"file_path": "src/_pytest/outcomes.py", "file_name": "outcomes.py", "file_type": "text/x-python", "category": "implementation", "start_line": 143, "end_line": 193, "span_ids": ["importorskip"], "tokens": 436}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def importorskip(modname, minversion=None, reason=None):\n \"\"\"Imports and returns the requested module ``modname``, or skip the current test\n if the module cannot be imported.\n\n :param str modname: the name of the module to import\n :param str minversion: if given, the imported module ``__version__`` attribute must be\n at least this minimal version, otherwise the test is still skipped.\n :param str reason: if given, this reason is shown as the message when the module\n cannot be imported.\n \"\"\"\n import warnings\n\n __tracebackhide__ = True\n compile(modname, \"\", \"eval\") # to catch syntaxerrors\n should_skip = False\n\n with warnings.catch_warnings():\n # make sure to ignore ImportWarnings that might happen because\n # of existing directories with the same name we're trying to\n # import but without a __init__.py file\n warnings.simplefilter(\"ignore\")\n try:\n __import__(modname)\n except ImportError:\n # Do not raise chained exception here(#1485)\n should_skip = True\n if should_skip:\n if reason is None:\n reason = \"could not import %r\" % (modname,)\n raise Skipped(reason, allow_module_level=True)\n mod = sys.modules[modname]\n if minversion is None:\n return mod\n verattr = getattr(mod, \"__version__\", None)\n if minversion is not None:\n try:\n from pkg_resources import parse_version as pv\n except ImportError:\n raise Skipped(\n \"we have a required version for %r but can not import \"\n \"pkg_resources to parse version strings.\" % (modname,),\n allow_module_level=True,\n )\n if verattr is None or pv(verattr) < pv(minversion):\n raise Skipped(\n \"module %r has __version__ %r, required is: %r\"\n % (modname, verattr, minversion),\n allow_module_level=True,\n )\n return mod", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pastebin.py__submit_failure_or_tes_pytest_addoption.group__addoption_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pastebin.py__submit_failure_or_tes_pytest_addoption.group__addoption_", "embedding": null, "metadata": {"file_path": "src/_pytest/pastebin.py", "file_name": "pastebin.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 24, "span_ids": ["imports", "pytest_addoption", "docstring"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" submit failure or test session information to a pastebin service. \"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport tempfile\n\nimport six\n\nimport pytest\n\n\ndef pytest_addoption(parser):\n group = parser.getgroup(\"terminal reporting\")\n group._addoption(\n \"--pastebin\",\n metavar=\"mode\",\n action=\"store\",\n dest=\"pastebin\",\n default=None,\n choices=[\"failed\", \"all\"],\n help=\"send failed|all info to bpaste.net pastebin service.\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pastebin.py_pytest_configure_pytest_configure.if_config_option_pastebin.if_tr_is_not_None_.tr._tw.write.tee_write": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pastebin.py_pytest_configure_pytest_configure.if_config_option_pastebin.if_tr_is_not_None_.tr._tw.write.tee_write", "embedding": null, "metadata": {"file_path": "src/_pytest/pastebin.py", "file_name": "pastebin.py", "file_type": "text/x-python", "category": "implementation", "start_line": 27, "end_line": 45, "span_ids": ["pytest_configure"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.hookimpl(trylast=True)\ndef pytest_configure(config):\n if config.option.pastebin == \"all\":\n tr = config.pluginmanager.getplugin(\"terminalreporter\")\n # if no terminal reporter plugin is present, nothing we can do here;\n # this can happen when this function executes in a slave node\n # when using pytest-xdist, for example\n if tr is not None:\n # pastebin file will be utf-8 encoded binary file\n config._pastebinfile = tempfile.TemporaryFile(\"w+b\")\n oldwrite = tr._tw.write\n\n def tee_write(s, **kwargs):\n oldwrite(s, **kwargs)\n if isinstance(s, six.text_type):\n s = s.encode(\"utf-8\")\n config._pastebinfile.write(s)\n\n tr._tw.write = tee_write", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pastebin.py_pytest_unconfigure_pytest_unconfigure.if_hasattr_config__past.tr_write_line_pastebin_s": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pastebin.py_pytest_unconfigure_pytest_unconfigure.if_hasattr_config__past.tr_write_line_pastebin_s", "embedding": null, "metadata": {"file_path": "src/_pytest/pastebin.py", "file_name": "pastebin.py", "file_type": "text/x-python", "category": "implementation", "start_line": 48, "end_line": 61, "span_ids": ["pytest_unconfigure"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_unconfigure(config):\n if hasattr(config, \"_pastebinfile\"):\n # get terminal contents and delete file\n config._pastebinfile.seek(0)\n sessionlog = config._pastebinfile.read()\n config._pastebinfile.close()\n del config._pastebinfile\n # undo our patching in the terminal reporter\n tr = config.pluginmanager.getplugin(\"terminalreporter\")\n del tr._tw.__dict__[\"write\"]\n # write summary\n tr.write_sep(\"=\", \"Sending information to Paste Service\")\n pastebinurl = create_new_paste(sessionlog)\n tr.write_line(\"pastebin session-log: %s\\n\" % pastebinurl)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pastebin.py_create_new_paste_create_new_paste.if_m_.else_.return._bad_response_respon": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pastebin.py_create_new_paste_create_new_paste.if_m_.else_.return._bad_response_respon", "embedding": null, "metadata": {"file_path": "src/_pytest/pastebin.py", "file_name": "pastebin.py", "file_type": "text/x-python", "category": "implementation", "start_line": 64, "end_line": 90, "span_ids": ["create_new_paste"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def create_new_paste(contents):\n \"\"\"\n Creates a new paste using bpaste.net service.\n\n :contents: paste contents as utf-8 encoded bytes\n :returns: url to the pasted contents\n \"\"\"\n import re\n\n if sys.version_info < (3, 0):\n from urllib import urlopen, urlencode\n else:\n from urllib.request import urlopen\n from urllib.parse import urlencode\n\n params = {\n \"code\": contents,\n \"lexer\": \"python3\" if sys.version_info[0] == 3 else \"python\",\n \"expiry\": \"1week\",\n }\n url = \"https://bpaste.net\"\n response = urlopen(url, data=urlencode(params).encode(\"ascii\")).read()\n m = re.search(r'href=\"/raw/(\\w+)\"', response.decode(\"utf-8\"))\n if m:\n return \"%s/show/%s\" % (url, m.group(1))\n else:\n return \"bad response: \" + response", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pastebin.py_pytest_terminal_summary_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pastebin.py_pytest_terminal_summary_", "embedding": null, "metadata": {"file_path": "src/_pytest/pastebin.py", "file_name": "pastebin.py", "file_type": "text/x-python", "category": "implementation", "start_line": 93, "end_line": 114, "span_ids": ["pytest_terminal_summary"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_terminal_summary(terminalreporter):\n import _pytest.config\n\n if terminalreporter.config.option.pastebin != \"failed\":\n return\n tr = terminalreporter\n if \"failed\" in tr.stats:\n terminalreporter.write_sep(\"=\", \"Sending information to Paste Service\")\n for rep in terminalreporter.stats.get(\"failed\"):\n try:\n msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc\n except AttributeError:\n msg = tr._getfailureheadline(rep)\n tw = _pytest.config.create_terminal_writer(\n terminalreporter.config, stringio=True\n )\n rep.toterminal(tw)\n s = tw.stringio.getvalue()\n assert len(s)\n pastebinurl = create_new_paste(s)\n tr.write_line(\"%s --> %s\" % (msg, pastebinurl))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pathlib.py_atexit_if_six_PY2_.else_._max.max": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pathlib.py_atexit_if_six_PY2_.else_._max.max", "embedding": null, "metadata": {"file_path": "src/_pytest/pathlib.py", "file_name": "pathlib.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 94, "span_ids": ["impl", "ensure_reset_dir", "rmtree", "imports:20", "impl:2", "imports:19", "find_prefixed", "find_suffixes", "impl:3", "extract_suffixes", "imports", "impl:9", "parse_num"], "tokens": 489}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import atexit\nimport errno\nimport fnmatch\nimport itertools\nimport operator\nimport os\nimport shutil\nimport sys\nimport uuid\nfrom functools import reduce\nfrom os.path import expanduser\nfrom os.path import expandvars\nfrom os.path import isabs\nfrom os.path import sep\nfrom posixpath import sep as posix_sep\n\nimport six\nfrom six.moves import map\n\nfrom .compat import PY36\n\nif PY36:\n from pathlib import Path, PurePath\nelse:\n from pathlib2 import Path, PurePath\n\n__all__ = [\"Path\", \"PurePath\"]\n\n\nLOCK_TIMEOUT = 60 * 60 * 3\n\nget_lock_path = operator.methodcaller(\"joinpath\", \".lock\")\n\n\ndef ensure_reset_dir(path):\n \"\"\"\n ensures the given path is an empty directory\n \"\"\"\n if path.exists():\n rmtree(path, force=True)\n path.mkdir()\n\n\ndef rmtree(path, force=False):\n if force:\n # NOTE: ignore_errors might leave dead folders around.\n # Python needs a rm -rf as a followup.\n shutil.rmtree(str(path), ignore_errors=True)\n else:\n shutil.rmtree(str(path))\n\n\ndef find_prefixed(root, prefix):\n \"\"\"finds all elements in root that begin with the prefix, case insensitive\"\"\"\n l_prefix = prefix.lower()\n for x in root.iterdir():\n if x.name.lower().startswith(l_prefix):\n yield x\n\n\ndef extract_suffixes(iter, prefix):\n \"\"\"\n :param iter: iterator over path names\n :param prefix: expected prefix of the path names\n :returns: the parts of the paths following the prefix\n \"\"\"\n p_len = len(prefix)\n for p in iter:\n yield p.name[p_len:]\n\n\ndef find_suffixes(root, prefix):\n \"\"\"combines find_prefixes and extract_suffixes\n \"\"\"\n return extract_suffixes(find_prefixed(root, prefix), prefix)\n\n\ndef parse_num(maybe_num):\n \"\"\"parses number path suffixes, returns -1 on error\"\"\"\n try:\n return int(maybe_num)\n except ValueError:\n return -1\n\n\nif six.PY2:\n\n def _max(iterable, default):\n \"\"\"needed due to python2.7 lacking the default argument for max\"\"\"\n return reduce(max, iterable, default)\n\n\nelse:\n _max = max", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pathlib.py__force_symlink__force_symlink.None_1.except_Exception_.pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pathlib.py__force_symlink__force_symlink.None_1.except_Exception_.pass", "embedding": null, "metadata": {"file_path": "src/_pytest/pathlib.py", "file_name": "pathlib.py", "file_type": "text/x-python", "category": "implementation", "start_line": 97, "end_line": 114, "span_ids": ["_force_symlink"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _force_symlink(root, target, link_to):\n \"\"\"helper to create the current symlink\n\n it's full of race conditions that are reasonably ok to ignore\n for the context of best effort linking to the latest testrun\n\n the presumption being thatin case of much parallelism\n the inaccuracy is going to be acceptable\n \"\"\"\n current_symlink = root.joinpath(target)\n try:\n current_symlink.unlink()\n except OSError:\n pass\n try:\n current_symlink.symlink_to(link_to)\n except Exception:\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pathlib.py_make_numbered_dir_make_numbered_dir.for_i_in_range_10_.else_.raise_EnvironmentError_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pathlib.py_make_numbered_dir_make_numbered_dir.for_i_in_range_10_.else_.raise_EnvironmentError_", "embedding": null, "metadata": {"file_path": "src/_pytest/pathlib.py", "file_name": "pathlib.py", "file_type": "text/x-python", "category": "implementation", "start_line": 117, "end_line": 135, "span_ids": ["make_numbered_dir"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def make_numbered_dir(root, prefix):\n \"\"\"create a directory with an increased number as suffix for the given prefix\"\"\"\n for i in range(10):\n # try up to 10 times to create the folder\n max_existing = _max(map(parse_num, find_suffixes(root, prefix)), default=-1)\n new_number = max_existing + 1\n new_path = root.joinpath(\"{}{}\".format(prefix, new_number))\n try:\n new_path.mkdir()\n except Exception:\n pass\n else:\n _force_symlink(root, prefix + \"current\", new_path)\n return new_path\n else:\n raise EnvironmentError(\n \"could not create numbered dir with prefix \"\n \"{prefix} in {root} after 10 tries\".format(prefix=prefix, root=root)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pathlib.py_create_cleanup_lock_register_cleanup_lock_removal.return.register_cleanup_on_exit_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pathlib.py_create_cleanup_lock_register_cleanup_lock_removal.return.register_cleanup_on_exit_", "embedding": null, "metadata": {"file_path": "src/_pytest/pathlib.py", "file_name": "pathlib.py", "file_type": "text/x-python", "category": "implementation", "start_line": 138, "end_line": 176, "span_ids": ["create_cleanup_lock", "register_cleanup_lock_removal"], "tokens": 285}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def create_cleanup_lock(p):\n \"\"\"crates a lock to prevent premature folder cleanup\"\"\"\n lock_path = get_lock_path(p)\n try:\n fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644)\n except OSError as e:\n if e.errno == errno.EEXIST:\n six.raise_from(\n EnvironmentError(\"cannot create lockfile in {path}\".format(path=p)), e\n )\n else:\n raise\n else:\n pid = os.getpid()\n spid = str(pid)\n if not isinstance(spid, bytes):\n spid = spid.encode(\"ascii\")\n os.write(fd, spid)\n os.close(fd)\n if not lock_path.is_file():\n raise EnvironmentError(\"lock path got renamed after successful creation\")\n return lock_path\n\n\ndef register_cleanup_lock_removal(lock_path, register=atexit.register):\n \"\"\"registers a cleanup function for removing a lock, by default on atexit\"\"\"\n pid = os.getpid()\n\n def cleanup_on_exit(lock_path=lock_path, original_pid=pid):\n current_pid = os.getpid()\n if current_pid != original_pid:\n # fork\n return\n try:\n lock_path.unlink()\n except (OSError, IOError):\n pass\n\n return register(cleanup_on_exit)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pathlib.py_maybe_delete_a_numbered_dir_maybe_delete_a_numbered_dir.try_.finally_.if_lock_path_is_not_None_.try_.except_OSError_IOError_.pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pathlib.py_maybe_delete_a_numbered_dir_maybe_delete_a_numbered_dir.try_.finally_.if_lock_path_is_not_None_.try_.except_OSError_IOError_.pass", "embedding": null, "metadata": {"file_path": "src/_pytest/pathlib.py", "file_name": "pathlib.py", "file_type": "text/x-python", "category": "implementation", "start_line": 179, "end_line": 202, "span_ids": ["maybe_delete_a_numbered_dir"], "tokens": 195}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def maybe_delete_a_numbered_dir(path):\n \"\"\"removes a numbered directory if its lock can be obtained and it does not seem to be in use\"\"\"\n lock_path = None\n try:\n lock_path = create_cleanup_lock(path)\n parent = path.parent\n\n garbage = parent.joinpath(\"garbage-{}\".format(uuid.uuid4()))\n path.rename(garbage)\n rmtree(garbage, force=True)\n except (OSError, EnvironmentError):\n # known races:\n # * other process did a cleanup at the same time\n # * deletable folder was found\n # * process cwd (Windows)\n return\n finally:\n # if we created the lock, ensure we remove it even if we failed\n # to properly remove the numbered dir\n if lock_path is not None:\n try:\n lock_path.unlink()\n except (OSError, IOError):\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pathlib.py_ensure_deletable_cleanup_numbered_dir.for_path_in_root_glob_ga.try_cleanup_path_conside": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pathlib.py_ensure_deletable_cleanup_numbered_dir.for_path_in_root_glob_ga.try_cleanup_path_conside", "embedding": null, "metadata": {"file_path": "src/_pytest/pathlib.py", "file_name": "pathlib.py", "file_type": "text/x-python", "category": "implementation", "start_line": 205, "end_line": 247, "span_ids": ["try_cleanup", "cleanup_numbered_dir", "cleanup_candidates", "ensure_deletable"], "tokens": 347}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def ensure_deletable(path, consider_lock_dead_if_created_before):\n \"\"\"checks if a lock exists and breaks it if its considered dead\"\"\"\n if path.is_symlink():\n return False\n lock = get_lock_path(path)\n if not lock.exists():\n return True\n try:\n lock_time = lock.stat().st_mtime\n except Exception:\n return False\n else:\n if lock_time < consider_lock_dead_if_created_before:\n lock.unlink()\n return True\n else:\n return False\n\n\ndef try_cleanup(path, consider_lock_dead_if_created_before):\n \"\"\"tries to cleanup a folder if we can ensure it's deletable\"\"\"\n if ensure_deletable(path, consider_lock_dead_if_created_before):\n maybe_delete_a_numbered_dir(path)\n\n\ndef cleanup_candidates(root, prefix, keep):\n \"\"\"lists candidates for numbered directories to be removed - follows py.path\"\"\"\n max_existing = _max(map(parse_num, find_suffixes(root, prefix)), default=-1)\n max_delete = max_existing - keep\n paths = find_prefixed(root, prefix)\n paths, paths2 = itertools.tee(paths)\n numbers = map(parse_num, extract_suffixes(paths2, prefix))\n for path, number in zip(paths, numbers):\n if number <= max_delete:\n yield path\n\n\ndef cleanup_numbered_dir(root, prefix, keep, consider_lock_dead_if_created_before):\n \"\"\"cleanup for lock driven numbered directories\"\"\"\n for path in cleanup_candidates(root, prefix, keep):\n try_cleanup(path, consider_lock_dead_if_created_before)\n for path in root.glob(\"garbage-*\"):\n try_cleanup(path, consider_lock_dead_if_created_before)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pathlib.py_make_numbered_dir_with_cleanup_resolve_from_str.if_isabs_input_.else_.return.root_joinpath_input_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pathlib.py_make_numbered_dir_with_cleanup_resolve_from_str.if_isabs_input_.else_.return.root_joinpath_input_", "embedding": null, "metadata": {"file_path": "src/_pytest/pathlib.py", "file_name": "pathlib.py", "file_type": "text/x-python", "category": "implementation", "start_line": 250, "end_line": 281, "span_ids": ["make_numbered_dir_with_cleanup", "resolve_from_str"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def make_numbered_dir_with_cleanup(root, prefix, keep, lock_timeout):\n \"\"\"creates a numbered dir with a cleanup lock and removes old ones\"\"\"\n e = None\n for i in range(10):\n try:\n p = make_numbered_dir(root, prefix)\n lock_path = create_cleanup_lock(p)\n register_cleanup_lock_removal(lock_path)\n except Exception as exc:\n e = exc\n else:\n consider_lock_dead_if_created_before = p.stat().st_mtime - lock_timeout\n cleanup_numbered_dir(\n root=root,\n prefix=prefix,\n keep=keep,\n consider_lock_dead_if_created_before=consider_lock_dead_if_created_before,\n )\n return p\n assert e is not None\n raise e\n\n\ndef resolve_from_str(input, root):\n assert not isinstance(input, Path), \"would break on py2\"\n root = Path(root)\n input = expanduser(input)\n input = expandvars(input)\n if isabs(input):\n return Path(input)\n else:\n return root.joinpath(input)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pathlib.py_fnmatch_ex_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pathlib.py_fnmatch_ex_", "embedding": null, "metadata": {"file_path": "src/_pytest/pathlib.py", "file_name": "pathlib.py", "file_type": "text/x-python", "category": "implementation", "start_line": 284, "end_line": 320, "span_ids": ["fnmatch_ex", "parts"], "tokens": 312}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def fnmatch_ex(pattern, path):\n \"\"\"FNMatcher port from py.path.common which works with PurePath() instances.\n\n The difference between this algorithm and PurePath.match() is that the latter matches \"**\" glob expressions\n for each part of the path, while this algorithm uses the whole path instead.\n\n For example:\n \"tests/foo/bar/doc/test_foo.py\" matches pattern \"tests/**/doc/test*.py\" with this algorithm, but not with\n PurePath.match().\n\n This algorithm was ported to keep backward-compatibility with existing settings which assume paths match according\n this logic.\n\n References:\n * https://bugs.python.org/issue29249\n * https://bugs.python.org/issue34731\n \"\"\"\n path = PurePath(path)\n iswin32 = sys.platform.startswith(\"win\")\n\n if iswin32 and sep not in pattern and posix_sep in pattern:\n # Running on Windows, the pattern has no Windows path separators,\n # and the pattern has one or more Posix path separators. Replace\n # the Posix path separators with the Windows path separator.\n pattern = pattern.replace(posix_sep, sep)\n\n if sep not in pattern:\n name = path.name\n else:\n name = six.text_type(path)\n return fnmatch.fnmatch(name, pattern)\n\n\ndef parts(s):\n parts = s.split(sep)\n return {sep.join(parts[: i + 1]) or sep for i in range(len(parts))}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py__disabled_by_default__IGNORE_PAM._filenames_added_when": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py__disabled_by_default__IGNORE_PAM._filenames_added_when", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 37, "span_ids": ["imports", "docstring", "impl"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"(disabled by default) support for testing pytest and pytest plugins.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport codecs\nimport gc\nimport os\nimport platform\nimport re\nimport subprocess\nimport sys\nimport time\nimport traceback\nfrom fnmatch import fnmatch\nfrom weakref import WeakKeyDictionary\n\nimport py\nimport six\n\nimport pytest\nfrom _pytest._code import Source\nfrom _pytest._io.saferepr import saferepr\nfrom _pytest.assertion.rewrite import AssertionRewritingHook\nfrom _pytest.capture import MultiCapture\nfrom _pytest.capture import SysCapture\nfrom _pytest.compat import safe_str\nfrom _pytest.compat import Sequence\nfrom _pytest.main import EXIT_INTERRUPTED\nfrom _pytest.main import EXIT_OK\nfrom _pytest.main import Session\nfrom _pytest.monkeypatch import MonkeyPatch\nfrom _pytest.pathlib import Path\n\nIGNORE_PAM = [ # filenames added when obtaining details about the current user\n u\"/var/lib/sss/mc/passwd\"\n]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_pytest_addoption_pytest_addoption.parser_addini_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_pytest_addoption_pytest_addoption.parser_addini_", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 40, "end_line": 62, "span_ids": ["pytest_addoption"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_addoption(parser):\n parser.addoption(\n \"--lsof\",\n action=\"store_true\",\n dest=\"lsof\",\n default=False,\n help=\"run FD checks if lsof is available\",\n )\n\n parser.addoption(\n \"--runpytest\",\n default=\"inprocess\",\n dest=\"runpytest\",\n choices=(\"inprocess\", \"subprocess\"),\n help=(\n \"run pytest sub runs in tests using an 'inprocess' \"\n \"or 'subprocess' (python -m main) method\"\n ),\n )\n\n parser.addini(\n \"pytester_example_dir\", help=\"directory to take the pytester example files from\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_pytest_configure_raise_on_kwargs.if_kwargs_pragma_no_.raise_TypeError_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_pytest_configure_raise_on_kwargs.if_kwargs_pragma_no_.raise_TypeError_", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 65, "end_line": 83, "span_ids": ["raise_on_kwargs", "pytest_configure"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_configure(config):\n if config.getvalue(\"lsof\"):\n checker = LsofFdLeakChecker()\n if checker.matching_platform():\n config.pluginmanager.register(checker)\n\n config.addinivalue_line(\n \"markers\",\n \"pytester_example_path(*path_segments): join the given path \"\n \"segments to `pytester_example_dir` for this test.\",\n )\n\n\ndef raise_on_kwargs(kwargs):\n __tracebackhide__ = True\n if kwargs: # pragma: no branch\n raise TypeError(\n \"Unexpected keyword arguments: {}\".format(\", \".join(sorted(kwargs)))\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_LsofFdLeakChecker_LsofFdLeakChecker._exec_lsof.with_open_os_devnull_wb.return.subprocess_check_output_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_LsofFdLeakChecker_LsofFdLeakChecker._exec_lsof.with_open_os_devnull_wb.return.subprocess_check_output_", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 86, "end_line": 98, "span_ids": ["LsofFdLeakChecker._exec_lsof", "LsofFdLeakChecker", "LsofFdLeakChecker.get_open_files"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LsofFdLeakChecker(object):\n def get_open_files(self):\n out = self._exec_lsof()\n open_files = self._parse_lsof_output(out)\n return open_files\n\n def _exec_lsof(self):\n pid = os.getpid()\n # py3: use subprocess.DEVNULL directly.\n with open(os.devnull, \"wb\") as devnull:\n return subprocess.check_output(\n (\"lsof\", \"-Ffn0\", \"-p\", str(pid)), stderr=devnull\n ).decode()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_LsofFdLeakChecker._parse_lsof_output_LsofFdLeakChecker.matching_platform.try_.else_.return.True": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_LsofFdLeakChecker._parse_lsof_output_LsofFdLeakChecker.matching_platform.try_.else_.return.True", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 100, "end_line": 129, "span_ids": ["LsofFdLeakChecker.matching_platform", "LsofFdLeakChecker._parse_lsof_output"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LsofFdLeakChecker(object):\n\n def _parse_lsof_output(self, out):\n def isopen(line):\n return line.startswith(\"f\") and (\n \"deleted\" not in line\n and \"mem\" not in line\n and \"txt\" not in line\n and \"cwd\" not in line\n )\n\n open_files = []\n\n for line in out.split(\"\\n\"):\n if isopen(line):\n fields = line.split(\"\\0\")\n fd = fields[0][1:]\n filename = fields[1][1:]\n if filename in IGNORE_PAM:\n continue\n if filename.startswith(\"/\"):\n open_files.append((fd, filename))\n\n return open_files\n\n def matching_platform(self):\n try:\n subprocess.check_output((\"lsof\", \"-v\"))\n except (OSError, subprocess.CalledProcessError):\n return False\n else:\n return True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_LsofFdLeakChecker.pytest_runtest_protocol_LsofFdLeakChecker.pytest_runtest_protocol.if_leaked_files_.item_warn_pytest_PytestWa": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_LsofFdLeakChecker.pytest_runtest_protocol_LsofFdLeakChecker.pytest_runtest_protocol.if_leaked_files_.item_warn_pytest_PytestWa", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 131, "end_line": 152, "span_ids": ["LsofFdLeakChecker.pytest_runtest_protocol"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LsofFdLeakChecker(object):\n\n @pytest.hookimpl(hookwrapper=True, tryfirst=True)\n def pytest_runtest_protocol(self, item):\n lines1 = self.get_open_files()\n yield\n if hasattr(sys, \"pypy_version_info\"):\n gc.collect()\n lines2 = self.get_open_files()\n\n new_fds = {t[0] for t in lines2} - {t[0] for t in lines1}\n leaked_files = [t for t in lines2 if t[0] in new_fds]\n if leaked_files:\n error = []\n error.append(\"***** %s FD leakage detected\" % len(leaked_files))\n error.extend([str(f) for f in leaked_files])\n error.append(\"*** Before:\")\n error.extend([str(f) for f in lines1])\n error.append(\"*** After:\")\n error.extend([str(f) for f in lines2])\n error.append(error[0])\n error.append(\"*** function %s:%s: %s \" % item.location)\n error.append(\"See issue #2366\")\n item.warn(pytest.PytestWarning(\"\\n\".join(error)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py__XXX_copied_from_execnet_ParsedCall.__repr__.return._ParsedCall_r_r_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py__XXX_copied_from_execnet_ParsedCall.__repr__.return._ParsedCall_r_r_", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 155, "end_line": 200, "span_ids": ["ParsedCall", "ParsedCall.__repr__", "get_public_names", "PytestArg", "_pytest", "PytestArg.gethookrecorder", "LsofFdLeakChecker.pytest_runtest_protocol", "impl:3"], "tokens": 323}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# XXX copied from execnet's conftest.py - needs to be merged\nwinpymap = {\n \"python2.7\": r\"C:\\Python27\\python.exe\",\n \"python3.4\": r\"C:\\Python34\\python.exe\",\n \"python3.5\": r\"C:\\Python35\\python.exe\",\n \"python3.6\": r\"C:\\Python36\\python.exe\",\n}\n\n\n# used at least by pytest-xdist plugin\n\n\n@pytest.fixture\ndef _pytest(request):\n \"\"\"Return a helper which offers a gethookrecorder(hook) method which\n returns a HookRecorder instance which helps to make assertions about called\n hooks.\n\n \"\"\"\n return PytestArg(request)\n\n\nclass PytestArg(object):\n def __init__(self, request):\n self.request = request\n\n def gethookrecorder(self, hook):\n hookrecorder = HookRecorder(hook._pm)\n self.request.addfinalizer(hookrecorder.finish_recording)\n return hookrecorder\n\n\ndef get_public_names(values):\n \"\"\"Only return names from iterator values without a leading underscore.\"\"\"\n return [x for x in values if x[0] != \"_\"]\n\n\nclass ParsedCall(object):\n def __init__(self, name, kwargs):\n self.__dict__.update(kwargs)\n self._name = name\n\n def __repr__(self):\n d = self.__dict__.copy()\n del d[\"_name\"]\n return \"\" % (self._name, d)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_HookRecorder_HookRecorder.getcalls.return._call_for_call_in_self_ca": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_HookRecorder_HookRecorder.getcalls.return._call_for_call_in_self_ca", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 203, "end_line": 229, "span_ids": ["HookRecorder", "HookRecorder.finish_recording", "HookRecorder.getcalls", "HookRecorder.__init__.before"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HookRecorder(object):\n \"\"\"Record all hooks called in a plugin manager.\n\n This wraps all the hook calls in the plugin manager, recording each call\n before propagating the normal calls.\n\n \"\"\"\n\n def __init__(self, pluginmanager):\n self._pluginmanager = pluginmanager\n self.calls = []\n\n def before(hook_name, hook_impls, kwargs):\n self.calls.append(ParsedCall(hook_name, kwargs))\n\n def after(outcome, hook_name, hook_impls, kwargs):\n pass\n\n self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after)\n\n def finish_recording(self):\n self._undo_wrapping()\n\n def getcalls(self, names):\n if isinstance(names, str):\n names = names.split()\n return [call for call in self.calls if call._name in names]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_HookRecorder.assert_contains_HookRecorder.assert_contains.while_entries_.for_ind_call_in_enumerat.else_.pytest_fail_could_not_fi": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_HookRecorder.assert_contains_HookRecorder.assert_contains.while_entries_.for_ind_call_in_enumerat.else_.pytest_fail_could_not_fi", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 231, "end_line": 250, "span_ids": ["HookRecorder.assert_contains"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HookRecorder(object):\n\n def assert_contains(self, entries):\n __tracebackhide__ = True\n i = 0\n entries = list(entries)\n backlocals = sys._getframe(1).f_locals\n while entries:\n name, check = entries.pop(0)\n for ind, call in enumerate(self.calls[i:]):\n if call._name == name:\n print(\"NAMEMATCH\", name, call)\n if eval(check, backlocals, call.__dict__):\n print(\"CHECKERMATCH\", repr(check), \"->\", call)\n else:\n print(\"NOCHECKERMATCH\", repr(check), \"-\", call)\n continue\n i += ind + 1\n break\n print(\"NONAMEMATCH\", name, \"with\", call)\n else:\n pytest.fail(\"could not find %r check %r\" % (name, check))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_HookRecorder.popcall_HookRecorder.getreports.return._x_report_for_x_in_self_g": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_HookRecorder.popcall_HookRecorder.getreports.return._x_report_for_x_in_self_g", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 252, "end_line": 270, "span_ids": ["HookRecorder.getreports", "HookRecorder.popcall", "HookRecorder.getcall"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HookRecorder(object):\n\n def popcall(self, name):\n __tracebackhide__ = True\n for i, call in enumerate(self.calls):\n if call._name == name:\n del self.calls[i]\n return call\n lines = [\"could not find call %r, in:\" % (name,)]\n lines.extend([\" %s\" % x for x in self.calls])\n pytest.fail(\"\\n\".join(lines))\n\n def getcall(self, name):\n values = self.getcalls(name)\n assert len(values) == 1, (name, values)\n return values[0]\n\n # functionality for test reports\n\n def getreports(self, names=\"pytest_runtest_logreport pytest_collectreport\"):\n return [x.report for x in self.getcalls(names)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_HookRecorder.matchreport_HookRecorder.matchreport.return.values_0_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_HookRecorder.matchreport_HookRecorder.matchreport.return.values_0_", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 272, "end_line": 297, "span_ids": ["HookRecorder.matchreport"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HookRecorder(object):\n\n def matchreport(\n self,\n inamepart=\"\",\n names=\"pytest_runtest_logreport pytest_collectreport\",\n when=None,\n ):\n \"\"\"return a testreport whose dotted import path matches\"\"\"\n values = []\n for rep in self.getreports(names=names):\n if not when and rep.when != \"call\" and rep.passed:\n # setup/teardown passing reports - let's ignore those\n continue\n if when and rep.when != when:\n continue\n if not inamepart or inamepart in rep.nodeid.split(\"::\"):\n values.append(rep)\n if not values:\n raise ValueError(\n \"could not find test report matching %r: \"\n \"no test reports at all!\" % (inamepart,)\n )\n if len(values) > 1:\n raise ValueError(\n \"found 2 or more testreports matching %r: %s\" % (inamepart, values)\n )\n return values[0]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_HookRecorder.getfailures_HookRecorder.clear.self_calls_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_HookRecorder.getfailures_HookRecorder.clear.self_calls_", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 299, "end_line": 330, "span_ids": ["HookRecorder.getfailures", "HookRecorder.assertoutcome", "HookRecorder.clear", "HookRecorder.getfailedcollections", "HookRecorder.listoutcomes", "HookRecorder.countoutcomes"], "tokens": 248}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class HookRecorder(object):\n\n def getfailures(self, names=\"pytest_runtest_logreport pytest_collectreport\"):\n return [rep for rep in self.getreports(names) if rep.failed]\n\n def getfailedcollections(self):\n return self.getfailures(\"pytest_collectreport\")\n\n def listoutcomes(self):\n passed = []\n skipped = []\n failed = []\n for rep in self.getreports(\"pytest_collectreport pytest_runtest_logreport\"):\n if rep.passed:\n if rep.when == \"call\":\n passed.append(rep)\n elif rep.skipped:\n skipped.append(rep)\n else:\n assert rep.failed, \"Unexpected outcome: {!r}\".format(rep)\n failed.append(rep)\n return passed, skipped, failed\n\n def countoutcomes(self):\n return [len(x) for x in self.listoutcomes()]\n\n def assertoutcome(self, passed=0, skipped=0, failed=0):\n realpassed, realskipped, realfailed = self.listoutcomes()\n assert passed == len(realpassed)\n assert skipped == len(realskipped)\n assert failed == len(realfailed)\n\n def clear(self):\n self.calls[:] = []", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_linecomp_rex_outcome.re_compile_r_d_w_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_linecomp_rex_outcome.re_compile_r_d_w_", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 333, "end_line": 366, "span_ids": ["LineMatcher_fixture", "_config_for_test", "_sys_snapshot", "linecomp", "testdir", "impl:5"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture\ndef linecomp(request):\n return LineComp()\n\n\n@pytest.fixture(name=\"LineMatcher\")\ndef LineMatcher_fixture(request):\n return LineMatcher\n\n\n@pytest.fixture\ndef testdir(request, tmpdir_factory):\n return Testdir(request, tmpdir_factory)\n\n\n@pytest.fixture\ndef _sys_snapshot():\n snappaths = SysPathsSnapshot()\n snapmods = SysModulesSnapshot()\n yield\n snapmods.restore()\n snappaths.restore()\n\n\n@pytest.fixture\ndef _config_for_test():\n from _pytest.config import get_config\n\n config = get_config()\n yield config\n config._ensure_unconfigure() # cleanup, e.g. capman closing tmpfiles.\n\n\nrex_outcome = re.compile(r\"(\\d+) ([\\w-]+)\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_RunResult_RunResult.parseoutcomes.raise_ValueError_Pytest_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_RunResult_RunResult.parseoutcomes.raise_ValueError_Pytest_", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 369, "end_line": 412, "span_ids": ["RunResult.parseoutcomes", "RunResult.__repr__", "RunResult"], "tokens": 327}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RunResult(object):\n \"\"\"The result of running a command.\n\n Attributes:\n\n :ret: the return value\n :outlines: list of lines captured from stdout\n :errlines: list of lines captures from stderr\n :stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to\n reconstruct stdout or the commonly used ``stdout.fnmatch_lines()``\n method\n :stderr: :py:class:`LineMatcher` of stderr\n :duration: duration in seconds\n\n \"\"\"\n\n def __init__(self, ret, outlines, errlines, duration):\n self.ret = ret\n self.outlines = outlines\n self.errlines = errlines\n self.stdout = LineMatcher(outlines)\n self.stderr = LineMatcher(errlines)\n self.duration = duration\n\n def __repr__(self):\n return (\n \"\"\n % (self.ret, len(self.stdout.lines), len(self.stderr.lines), self.duration)\n )\n\n def parseoutcomes(self):\n \"\"\"Return a dictionary of outcomestring->num from parsing the terminal\n output that the test process produced.\n\n \"\"\"\n for line in reversed(self.outlines):\n if \"seconds\" in line:\n outcomes = rex_outcome.findall(line)\n if outcomes:\n d = {}\n for num, cat in outcomes:\n d[cat] = int(num)\n return d\n raise ValueError(\"Pytest terminal report not found\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_RunResult.assert_outcomes_RunResult.assert_outcomes.assert_obtained_expect": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_RunResult.assert_outcomes_RunResult.assert_outcomes.assert_obtained_expect", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 414, "end_line": 438, "span_ids": ["RunResult.assert_outcomes"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class RunResult(object):\n\n def assert_outcomes(\n self, passed=0, skipped=0, failed=0, error=0, xpassed=0, xfailed=0\n ):\n \"\"\"Assert that the specified outcomes appear with the respective\n numbers (0 means it didn't occur) in the text output from a test run.\n\n \"\"\"\n d = self.parseoutcomes()\n obtained = {\n \"passed\": d.get(\"passed\", 0),\n \"skipped\": d.get(\"skipped\", 0),\n \"failed\": d.get(\"failed\", 0),\n \"error\": d.get(\"error\", 0),\n \"xpassed\": d.get(\"xpassed\", 0),\n \"xfailed\": d.get(\"xfailed\", 0),\n }\n expected = {\n \"passed\": passed,\n \"skipped\": skipped,\n \"failed\": failed,\n \"error\": error,\n \"xpassed\": xpassed,\n \"xfailed\": xfailed,\n }\n assert obtained == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_CwdSnapshot_SysPathsSnapshot.restore.sys_path_sys_meta_pat": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_CwdSnapshot_SysPathsSnapshot.restore.sys_path_sys_meta_pat", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 441, "end_line": 468, "span_ids": ["CwdSnapshot.restore", "SysPathsSnapshot", "SysPathsSnapshot.restore", "SysModulesSnapshot", "SysModulesSnapshot.restore", "CwdSnapshot"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CwdSnapshot(object):\n def __init__(self):\n self.__saved = os.getcwd()\n\n def restore(self):\n os.chdir(self.__saved)\n\n\nclass SysModulesSnapshot(object):\n def __init__(self, preserve=None):\n self.__preserve = preserve\n self.__saved = dict(sys.modules)\n\n def restore(self):\n if self.__preserve:\n self.__saved.update(\n (k, m) for k, m in sys.modules.items() if self.__preserve(k)\n )\n sys.modules.clear()\n sys.modules.update(self.__saved)\n\n\nclass SysPathsSnapshot(object):\n def __init__(self):\n self.__saved = list(sys.path), list(sys.meta_path)\n\n def restore(self):\n sys.path[:], sys.meta_path[:] = self.__saved", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir_Testdir.chdir.self_tmpdir_chdir_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir_Testdir.chdir.self_tmpdir_chdir_", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 471, "end_line": 559, "span_ids": ["Testdir.__str__", "Testdir.TimeoutExpired", "Testdir.__init__", "Testdir", "Testdir.__repr__", "Testdir.chdir", "Testdir.__take_sys_modules_snapshot", "Testdir.finalize", "Testdir.__init__:2", "Testdir.make_hook_recorder"], "tokens": 733}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Testdir(object):\n \"\"\"Temporary test directory with tools to test/run pytest itself.\n\n This is based on the ``tmpdir`` fixture but provides a number of methods\n which aid with testing pytest itself. Unless :py:meth:`chdir` is used all\n methods will use :py:attr:`tmpdir` as their current working directory.\n\n Attributes:\n\n :tmpdir: The :py:class:`py.path.local` instance of the temporary directory.\n\n :plugins: A list of plugins to use with :py:meth:`parseconfig` and\n :py:meth:`runpytest`. Initially this is an empty list but plugins can\n be added to the list. The type of items to add to the list depends on\n the method using them so refer to them for details.\n\n \"\"\"\n\n CLOSE_STDIN = object\n\n class TimeoutExpired(Exception):\n pass\n\n def __init__(self, request, tmpdir_factory):\n self.request = request\n self._mod_collections = WeakKeyDictionary()\n name = request.function.__name__\n self.tmpdir = tmpdir_factory.mktemp(name, numbered=True)\n self.test_tmproot = tmpdir_factory.mktemp(\"tmp-\" + name, numbered=True)\n self.plugins = []\n self._cwd_snapshot = CwdSnapshot()\n self._sys_path_snapshot = SysPathsSnapshot()\n self._sys_modules_snapshot = self.__take_sys_modules_snapshot()\n self.chdir()\n self.request.addfinalizer(self.finalize)\n method = self.request.config.getoption(\"--runpytest\")\n if method == \"inprocess\":\n self._runpytest_method = self.runpytest_inprocess\n elif method == \"subprocess\":\n self._runpytest_method = self.runpytest_subprocess\n\n mp = self.monkeypatch = MonkeyPatch()\n mp.setenv(\"PYTEST_DEBUG_TEMPROOT\", str(self.test_tmproot))\n # Ensure no unexpected caching via tox.\n mp.delenv(\"TOX_ENV_DIR\", raising=False)\n # Discard outer pytest options.\n mp.delenv(\"PYTEST_ADDOPTS\", raising=False)\n\n def __repr__(self):\n return \"\" % (self.tmpdir,)\n\n def __str__(self):\n return str(self.tmpdir)\n\n def finalize(self):\n \"\"\"Clean up global state artifacts.\n\n Some methods modify the global interpreter state and this tries to\n clean this up. It does not remove the temporary directory however so\n it can be looked at after the test run has finished.\n\n \"\"\"\n self._sys_modules_snapshot.restore()\n self._sys_path_snapshot.restore()\n self._cwd_snapshot.restore()\n self.monkeypatch.undo()\n\n def __take_sys_modules_snapshot(self):\n # some zope modules used by twisted-related tests keep internal state\n # and can't be deleted; we had some trouble in the past with\n # `zope.interface` for example\n def preserve_module(name):\n return name.startswith(\"zope\")\n\n return SysModulesSnapshot(preserve=preserve_module)\n\n def make_hook_recorder(self, pluginmanager):\n \"\"\"Create a new :py:class:`HookRecorder` for a PluginManager.\"\"\"\n pluginmanager.reprec = reprec = HookRecorder(pluginmanager)\n self.request.addfinalizer(reprec.finish_recording)\n return reprec\n\n def chdir(self):\n \"\"\"Cd into the temporary directory.\n\n This is done automatically upon instantiation.\n\n \"\"\"\n self.tmpdir.chdir()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir._makefile_Testdir._makefile.return.ret": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir._makefile_Testdir._makefile.return.ret", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 561, "end_line": 581, "span_ids": ["Testdir._makefile"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Testdir(object):\n\n def _makefile(self, ext, args, kwargs, encoding=\"utf-8\"):\n items = list(kwargs.items())\n\n def to_text(s):\n return s.decode(encoding) if isinstance(s, bytes) else six.text_type(s)\n\n if args:\n source = u\"\\n\".join(to_text(x) for x in args)\n basename = self.request.function.__name__\n items.insert(0, (basename, source))\n\n ret = None\n for basename, value in items:\n p = self.tmpdir.join(basename).new(ext=ext)\n p.dirpath().ensure_dir()\n source = Source(value)\n source = u\"\\n\".join(to_text(line) for line in source.lines)\n p.write(source.strip().encode(encoding), \"wb\")\n if ret is None:\n ret = p\n return ret", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.makefile_Testdir.makefile.return.self__makefile_ext_args_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.makefile_Testdir.makefile.return.self__makefile_ext_args_", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 583, "end_line": 602, "span_ids": ["Testdir.makefile"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Testdir(object):\n\n def makefile(self, ext, *args, **kwargs):\n r\"\"\"Create new file(s) in the testdir.\n\n :param str ext: The extension the file(s) should use, including the dot, e.g. `.py`.\n :param list[str] args: All args will be treated as strings and joined using newlines.\n The result will be written as contents to the file. The name of the\n file will be based on the test function requesting this fixture.\n :param kwargs: Each keyword is the name of a file, while the value of it will\n be written as contents of the file.\n\n Examples:\n\n .. code-block:: python\n\n testdir.makefile(\".txt\", \"line1\", \"line2\")\n\n testdir.makefile(\".ini\", pytest=\"[pytest]\\naddopts=-rs\\n\")\n\n \"\"\"\n return self._makefile(ext, args, kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.makeconftest_Testdir.mkpydir.return.p": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.makeconftest_Testdir.mkpydir.return.p", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 604, "end_line": 649, "span_ids": ["Testdir.mkdir", "Testdir.syspathinsert", "Testdir.maketxtfile", "Testdir.makeini", "Testdir.mkpydir", "Testdir.makepyfile", "Testdir.makeconftest", "Testdir.getinicfg"], "tokens": 364}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Testdir(object):\n\n def makeconftest(self, source):\n \"\"\"Write a contest.py file with 'source' as contents.\"\"\"\n return self.makepyfile(conftest=source)\n\n def makeini(self, source):\n \"\"\"Write a tox.ini file with 'source' as contents.\"\"\"\n return self.makefile(\".ini\", tox=source)\n\n def getinicfg(self, source):\n \"\"\"Return the pytest section from the tox.ini config file.\"\"\"\n p = self.makeini(source)\n return py.iniconfig.IniConfig(p)[\"pytest\"]\n\n def makepyfile(self, *args, **kwargs):\n \"\"\"Shortcut for .makefile() with a .py extension.\"\"\"\n return self._makefile(\".py\", args, kwargs)\n\n def maketxtfile(self, *args, **kwargs):\n \"\"\"Shortcut for .makefile() with a .txt extension.\"\"\"\n return self._makefile(\".txt\", args, kwargs)\n\n def syspathinsert(self, path=None):\n \"\"\"Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`.\n\n This is undone automatically when this object dies at the end of each\n test.\n \"\"\"\n if path is None:\n path = self.tmpdir\n\n self.monkeypatch.syspath_prepend(str(path))\n\n def mkdir(self, name):\n \"\"\"Create a new (sub)directory.\"\"\"\n return self.tmpdir.mkdir(name)\n\n def mkpydir(self, name):\n \"\"\"Create a new python package.\n\n This creates a (sub)directory with an empty ``__init__.py`` file so it\n gets recognised as a python package.\n\n \"\"\"\n p = self.mkdir(name)\n p.ensure(\"__init__.py\")\n return p", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.copy_example_Testdir.copy_example.if_example_path_isdir_a.else_.raise_LookupError_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.copy_example_Testdir.copy_example.if_example_path_isdir_a.else_.raise_LookupError_", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 651, "end_line": 693, "span_ids": ["Testdir.copy_example"], "tokens": 339}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Testdir(object):\n\n def copy_example(self, name=None):\n import warnings\n from _pytest.warning_types import PYTESTER_COPY_EXAMPLE\n\n warnings.warn(PYTESTER_COPY_EXAMPLE, stacklevel=2)\n example_dir = self.request.config.getini(\"pytester_example_dir\")\n if example_dir is None:\n raise ValueError(\"pytester_example_dir is unset, can't copy examples\")\n example_dir = self.request.config.rootdir.join(example_dir)\n\n for extra_element in self.request.node.iter_markers(\"pytester_example_path\"):\n assert extra_element.args\n example_dir = example_dir.join(*extra_element.args)\n\n if name is None:\n func_name = self.request.function.__name__\n maybe_dir = example_dir / func_name\n maybe_file = example_dir / (func_name + \".py\")\n\n if maybe_dir.isdir():\n example_path = maybe_dir\n elif maybe_file.isfile():\n example_path = maybe_file\n else:\n raise LookupError(\n \"{} cant be found as module or package in {}\".format(\n func_name, example_dir.bestrelpath(self.request.config.rootdir)\n )\n )\n else:\n example_path = example_dir.join(name)\n\n if example_path.isdir() and not example_path.join(\"__init__.py\").isfile():\n example_path.copy(self.tmpdir)\n return self.tmpdir\n elif example_path.isfile():\n result = self.tmpdir.join(example_path.basename)\n example_path.copy(result)\n return result\n else:\n raise LookupError(\n 'example \"{}\" is not found as a file or directory'.format(example_path)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.Session_Testdir.getnode.return.res": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.Session_Testdir.getnode.return.res", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 695, "end_line": 713, "span_ids": ["Testdir:5", "Testdir.getnode"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Testdir(object):\n\n Session = Session\n\n def getnode(self, config, arg):\n \"\"\"Return the collection node of a file.\n\n :param config: :py:class:`_pytest.config.Config` instance, see\n :py:meth:`parseconfig` and :py:meth:`parseconfigure` to create the\n configuration\n\n :param arg: a :py:class:`py.path.local` instance of the file\n\n \"\"\"\n session = Session(config)\n assert \"::\" not in str(arg)\n p = py.path.local(arg)\n config.hook.pytest_sessionstart(session=session)\n res = session.perform_collect([str(p)], genitems=False)[0]\n config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)\n return res", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.getpathnode_Testdir.getpathnode.return.res": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.getpathnode_Testdir.getpathnode.return.res", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 715, "end_line": 730, "span_ids": ["Testdir.getpathnode"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Testdir(object):\n\n def getpathnode(self, path):\n \"\"\"Return the collection node of a file.\n\n This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to\n create the (configured) pytest Config instance.\n\n :param path: a :py:class:`py.path.local` instance of the file\n\n \"\"\"\n config = self.parseconfigure(path)\n session = Session(config)\n x = session.fspath.bestrelpath(path)\n config.hook.pytest_sessionstart(session=session)\n res = session.perform_collect([x], genitems=False)[0]\n config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)\n return res", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.genitems_Testdir.runitem.return.runner_item_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.genitems_Testdir.runitem.return.runner_item_", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 732, "end_line": 759, "span_ids": ["Testdir.genitems", "Testdir.runitem"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Testdir(object):\n\n def genitems(self, colitems):\n \"\"\"Generate all test items from a collection node.\n\n This recurses into the collection node and returns a list of all the\n test items contained within.\n\n \"\"\"\n session = colitems[0].session\n result = []\n for colitem in colitems:\n result.extend(session.genitems(colitem))\n return result\n\n def runitem(self, source):\n \"\"\"Run the \"test_func\" Item.\n\n The calling test instance (class containing the test method) must\n provide a ``.getrunner()`` method which should return a runner which\n can run the test protocol for a single item, e.g.\n :py:func:`_pytest.runner.runtestprotocol`.\n\n \"\"\"\n # used from runner functional tests\n item = self.getitem(source)\n # the test class where we are called from wants to provide the runner\n testclassinstance = self.request.instance\n runner = testclassinstance.getrunner()\n return runner(item)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.inline_runsource_Testdir.inline_runsource.return.self_inline_run_values_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.inline_runsource_Testdir.inline_runsource.return.self_inline_run_values_", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 761, "end_line": 777, "span_ids": ["Testdir.inline_runsource"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Testdir(object):\n\n def inline_runsource(self, source, *cmdlineargs):\n \"\"\"Run a test module in process using ``pytest.main()``.\n\n This run writes \"source\" into a temporary file and runs\n ``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance\n for the result.\n\n :param source: the source code of the test module\n\n :param cmdlineargs: any extra command line arguments to use\n\n :return: :py:class:`HookRecorder` instance of the result\n\n \"\"\"\n p = self.makepyfile(source)\n values = list(cmdlineargs) + [p]\n return self.inline_run(*values)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.inline_genitems_Testdir.inline_genitems.return.items_rec": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.inline_genitems_Testdir.inline_genitems.return.items_rec", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 779, "end_line": 789, "span_ids": ["Testdir.inline_genitems"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Testdir(object):\n\n def inline_genitems(self, *args):\n \"\"\"Run ``pytest.main(['--collectonly'])`` in-process.\n\n Runs the :py:func:`pytest.main` function to run all of pytest inside\n the test process itself like :py:meth:`inline_run`, but returns a\n tuple of the collected items and a :py:class:`HookRecorder` instance.\n\n \"\"\"\n rec = self.inline_run(\"--collect-only\", *args)\n items = [x.item for x in rec.getcalls(\"pytest_itemcollected\")]\n return items, rec", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.inline_run_Testdir.inline_run.try_.finally_.for_finalizer_in_finalize.finalizer_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.inline_run_Testdir.inline_run.try_.finally_.for_finalizer_in_finalize.finalizer_", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 791, "end_line": 870, "span_ids": ["Testdir.inline_run.try_.if_len_rec_1_.else_.reprec", "Testdir.inline_run.try_.if_len_rec_1_.else_.reprec:2", "Testdir.inline_run", "Testdir.inline_run.try_.Collect.pytest_configure", "Testdir.inline_run.try_.Collect"], "tokens": 688}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Testdir(object):\n\n def inline_run(self, *args, **kwargs):\n \"\"\"Run ``pytest.main()`` in-process, returning a HookRecorder.\n\n Runs the :py:func:`pytest.main` function to run all of pytest inside\n the test process itself. This means it can return a\n :py:class:`HookRecorder` instance which gives more detailed results\n from that run than can be done by matching stdout/stderr from\n :py:meth:`runpytest`.\n\n :param args: command line arguments to pass to :py:func:`pytest.main`\n\n :param plugins: (keyword-only) extra plugin instances the\n ``pytest.main()`` instance should use\n\n :return: a :py:class:`HookRecorder` instance\n \"\"\"\n plugins = kwargs.pop(\"plugins\", [])\n no_reraise_ctrlc = kwargs.pop(\"no_reraise_ctrlc\", None)\n raise_on_kwargs(kwargs)\n\n finalizers = []\n try:\n # Do not load user config (during runs only).\n mp_run = MonkeyPatch()\n mp_run.setenv(\"HOME\", str(self.tmpdir))\n mp_run.setenv(\"USERPROFILE\", str(self.tmpdir))\n finalizers.append(mp_run.undo)\n\n # When running pytest inline any plugins active in the main test\n # process are already imported. So this disables the warning which\n # will trigger to say they can no longer be rewritten, which is\n # fine as they have already been rewritten.\n orig_warn = AssertionRewritingHook._warn_already_imported\n\n def revert_warn_already_imported():\n AssertionRewritingHook._warn_already_imported = orig_warn\n\n finalizers.append(revert_warn_already_imported)\n AssertionRewritingHook._warn_already_imported = lambda *a: None\n\n # Any sys.module or sys.path changes done while running pytest\n # inline should be reverted after the test run completes to avoid\n # clashing with later inline tests run within the same pytest test,\n # e.g. just because they use matching test module names.\n finalizers.append(self.__take_sys_modules_snapshot().restore)\n finalizers.append(SysPathsSnapshot().restore)\n\n # Important note:\n # - our tests should not leave any other references/registrations\n # laying around other than possibly loaded test modules\n # referenced from sys.modules, as nothing will clean those up\n # automatically\n\n rec = []\n\n class Collect(object):\n def pytest_configure(x, config):\n rec.append(self.make_hook_recorder(config.pluginmanager))\n\n plugins.append(Collect())\n ret = pytest.main(list(args), plugins=plugins)\n if len(rec) == 1:\n reprec = rec.pop()\n else:\n\n class reprec(object):\n pass\n\n reprec.ret = ret\n\n # typically we reraise keyboard interrupts from the child run\n # because it's our user requesting interruption of the testing\n if ret == EXIT_INTERRUPTED and not no_reraise_ctrlc:\n calls = reprec.getcalls(\"pytest_keyboard_interrupt\")\n if calls and calls[-1].excinfo.type == KeyboardInterrupt:\n raise KeyboardInterrupt()\n return reprec\n finally:\n for finalizer in finalizers:\n finalizer()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.runpytest_inprocess_Testdir.runpytest_inprocess.return.res": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.runpytest_inprocess_Testdir.runpytest_inprocess.return.res", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 872, "end_line": 905, "span_ids": ["Testdir.runpytest_inprocess.try_.try_.except_SystemExit_as_e_.reprec", "Testdir.runpytest_inprocess.try_.try_.except_Exception_.reprec", "Testdir.runpytest_inprocess.try_.try_.except_Exception_.reprec:2", "Testdir.runpytest_inprocess", "Testdir.runpytest_inprocess.try_.try_.except_SystemExit_as_e_.reprec:2"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Testdir(object):\n\n def runpytest_inprocess(self, *args, **kwargs):\n \"\"\"Return result of running pytest in-process, providing a similar\n interface to what self.runpytest() provides.\n \"\"\"\n syspathinsert = kwargs.pop(\"syspathinsert\", False)\n\n if syspathinsert:\n self.syspathinsert()\n now = time.time()\n capture = MultiCapture(Capture=SysCapture)\n capture.start_capturing()\n try:\n try:\n reprec = self.inline_run(*args, **kwargs)\n except SystemExit as e:\n\n class reprec(object):\n ret = e.args[0]\n\n except Exception:\n traceback.print_exc()\n\n class reprec(object):\n ret = 3\n\n finally:\n out, err = capture.readouterr()\n capture.stop_capturing()\n sys.stdout.write(out)\n sys.stderr.write(err)\n\n res = RunResult(reprec.ret, out.split(\"\\n\"), err.split(\"\\n\"), time.time() - now)\n res.reprec = reprec\n return res", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.runpytest_Testdir._ensure_basetemp.return.args": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.runpytest_Testdir._ensure_basetemp.return.args", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 907, "end_line": 922, "span_ids": ["Testdir._ensure_basetemp", "Testdir.runpytest"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Testdir(object):\n\n def runpytest(self, *args, **kwargs):\n \"\"\"Run pytest inline or in a subprocess, depending on the command line\n option \"--runpytest\" and return a :py:class:`RunResult`.\n\n \"\"\"\n args = self._ensure_basetemp(args)\n return self._runpytest_method(*args, **kwargs)\n\n def _ensure_basetemp(self, args):\n args = list(args)\n for x in args:\n if safe_str(x).startswith(\"--basetemp\"):\n break\n else:\n args.append(\"--basetemp=%s\" % self.tmpdir.dirpath(\"basetemp\"))\n return args", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.parseconfig_Testdir.parseconfig.return.config": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.parseconfig_Testdir.parseconfig.return.config", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 924, "end_line": 945, "span_ids": ["Testdir.parseconfig"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Testdir(object):\n\n def parseconfig(self, *args):\n \"\"\"Return a new pytest Config instance from given commandline args.\n\n This invokes the pytest bootstrapping code in _pytest.config to create\n a new :py:class:`_pytest.core.PluginManager` and call the\n pytest_cmdline_parse hook to create a new\n :py:class:`_pytest.config.Config` instance.\n\n If :py:attr:`plugins` has been populated they should be plugin modules\n to be registered with the PluginManager.\n\n \"\"\"\n args = self._ensure_basetemp(args)\n\n import _pytest.config\n\n config = _pytest.config._prepareconfig(args, self.plugins)\n # we don't know what the test will do with this half-setup config\n # object and thus we make sure it gets unconfigured properly in any\n # case (otherwise capturing could still be active, for example)\n self.request.addfinalizer(config._ensure_unconfigure)\n return config", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.parseconfigure_Testdir.getitem.assert_0_r_item_not_fo": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.parseconfigure_Testdir.getitem.assert_0_r_item_not_fo", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 947, "end_line": 980, "span_ids": ["Testdir.getitem", "Testdir.parseconfigure"], "tokens": 243}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Testdir(object):\n\n def parseconfigure(self, *args):\n \"\"\"Return a new pytest configured Config instance.\n\n This returns a new :py:class:`_pytest.config.Config` instance like\n :py:meth:`parseconfig`, but also calls the pytest_configure hook.\n\n \"\"\"\n config = self.parseconfig(*args)\n config._do_configure()\n self.request.addfinalizer(config._ensure_unconfigure)\n return config\n\n def getitem(self, source, funcname=\"test_func\"):\n \"\"\"Return the test item for a test function.\n\n This writes the source to a python file and runs pytest's collection on\n the resulting module, returning the test item for the requested\n function name.\n\n :param source: the module source\n\n :param funcname: the name of the test function for which to return a\n test item\n\n \"\"\"\n items = self.getitems(source)\n for item in items:\n if item.name == funcname:\n return item\n assert 0, \"%r item not found in module:\\n%s\\nitems: %s\" % (\n funcname,\n source,\n items,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.getitems_Testdir.getmodulecol.return.self_getnode_config_path": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.getitems_Testdir.getmodulecol.return.self_getnode_config_path", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 982, "end_line": 1017, "span_ids": ["Testdir.getitems", "Testdir.getmodulecol"], "tokens": 308}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Testdir(object):\n\n def getitems(self, source):\n \"\"\"Return all test items collected from the module.\n\n This writes the source to a python file and runs pytest's collection on\n the resulting module, returning all test items contained within.\n\n \"\"\"\n modcol = self.getmodulecol(source)\n return self.genitems([modcol])\n\n def getmodulecol(self, source, configargs=(), withinit=False):\n \"\"\"Return the module collection node for ``source``.\n\n This writes ``source`` to a file using :py:meth:`makepyfile` and then\n runs the pytest collection on it, returning the collection node for the\n test module.\n\n :param source: the source code of the module to collect\n\n :param configargs: any extra arguments to pass to\n :py:meth:`parseconfigure`\n\n :param withinit: whether to also write an ``__init__.py`` file to the\n same directory to ensure it is a package\n\n \"\"\"\n if isinstance(source, Path):\n path = self.tmpdir.join(str(source))\n assert not withinit, \"not supported for paths\"\n else:\n kw = {self.request.function.__name__: Source(source).strip()}\n path = self.makepyfile(**kw)\n if withinit:\n self.makepyfile(__init__=\"#\")\n self.config = config = self.parseconfigure(path, *configargs)\n return self.getnode(config, path)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.collect_by_name_Testdir.collect_by_name.for_colitem_in_self__mod_.if_colitem_name_name_.return.colitem": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.collect_by_name_Testdir.collect_by_name.for_colitem_in_self__mod_.if_colitem_name_name_.return.colitem", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1019, "end_line": 1034, "span_ids": ["Testdir.collect_by_name"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Testdir(object):\n\n def collect_by_name(self, modcol, name):\n \"\"\"Return the collection node for name from the module collection.\n\n This will search a module collection node for a collection node\n matching the given name.\n\n :param modcol: a module collection node; see :py:meth:`getmodulecol`\n\n :param name: the name of the node to return\n\n \"\"\"\n if modcol not in self._mod_collections:\n self._mod_collections[modcol] = list(modcol.collect())\n for colitem in self._mod_collections[modcol]:\n if colitem.name == name:\n return colitem", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.popen_Testdir.popen.return.popen": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.popen_Testdir.popen.return.popen", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1036, "end_line": 1074, "span_ids": ["Testdir.popen"], "tokens": 259}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Testdir(object):\n\n def popen(\n self,\n cmdargs,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdin=CLOSE_STDIN,\n **kw\n ):\n \"\"\"Invoke subprocess.Popen.\n\n This calls subprocess.Popen making sure the current working directory\n is in the PYTHONPATH.\n\n You probably want to use :py:meth:`run` instead.\n\n \"\"\"\n env = os.environ.copy()\n env[\"PYTHONPATH\"] = os.pathsep.join(\n filter(None, [os.getcwd(), env.get(\"PYTHONPATH\", \"\")])\n )\n # Do not load user config.\n env[\"HOME\"] = str(self.tmpdir)\n env[\"USERPROFILE\"] = env[\"HOME\"]\n kw[\"env\"] = env\n\n if stdin is Testdir.CLOSE_STDIN:\n kw[\"stdin\"] = subprocess.PIPE\n elif isinstance(stdin, bytes):\n kw[\"stdin\"] = subprocess.PIPE\n else:\n kw[\"stdin\"] = stdin\n\n popen = subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw)\n if stdin is Testdir.CLOSE_STDIN:\n popen.stdin.close()\n elif isinstance(stdin, bytes):\n popen.stdin.write(stdin)\n\n return popen", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.run_Testdir.run.return.RunResult_ret_out_err_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.run_Testdir.run.return.RunResult_ret_out_err_", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1076, "end_line": 1165, "span_ids": ["Testdir.run"], "tokens": 649}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Testdir(object):\n\n def run(self, *cmdargs, **kwargs):\n \"\"\"Run a command with arguments.\n\n Run a process using subprocess.Popen saving the stdout and stderr.\n\n :param args: the sequence of arguments to pass to `subprocess.Popen()`\n :param timeout: the period in seconds after which to timeout and raise\n :py:class:`Testdir.TimeoutExpired`\n :param stdin: optional standard input. Bytes are being send, closing\n the pipe, otherwise it is passed through to ``popen``.\n Defaults to ``CLOSE_STDIN``, which translates to using a pipe\n (``subprocess.PIPE``) that gets closed.\n\n Returns a :py:class:`RunResult`.\n\n \"\"\"\n __tracebackhide__ = True\n\n timeout = kwargs.pop(\"timeout\", None)\n stdin = kwargs.pop(\"stdin\", Testdir.CLOSE_STDIN)\n raise_on_kwargs(kwargs)\n\n cmdargs = [\n str(arg) if isinstance(arg, py.path.local) else arg for arg in cmdargs\n ]\n p1 = self.tmpdir.join(\"stdout\")\n p2 = self.tmpdir.join(\"stderr\")\n print(\"running:\", *cmdargs)\n print(\" in:\", py.path.local())\n f1 = codecs.open(str(p1), \"w\", encoding=\"utf8\")\n f2 = codecs.open(str(p2), \"w\", encoding=\"utf8\")\n try:\n now = time.time()\n popen = self.popen(\n cmdargs,\n stdin=stdin,\n stdout=f1,\n stderr=f2,\n close_fds=(sys.platform != \"win32\"),\n )\n if isinstance(stdin, bytes):\n popen.stdin.close()\n\n def handle_timeout():\n __tracebackhide__ = True\n\n timeout_message = (\n \"{seconds} second timeout expired running:\"\n \" {command}\".format(seconds=timeout, command=cmdargs)\n )\n\n popen.kill()\n popen.wait()\n raise self.TimeoutExpired(timeout_message)\n\n if timeout is None:\n ret = popen.wait()\n elif six.PY3:\n try:\n ret = popen.wait(timeout)\n except subprocess.TimeoutExpired:\n handle_timeout()\n else:\n end = time.time() + timeout\n\n resolution = min(0.1, timeout / 10)\n\n while True:\n ret = popen.poll()\n if ret is not None:\n break\n\n if time.time() > end:\n handle_timeout()\n\n time.sleep(resolution)\n finally:\n f1.close()\n f2.close()\n f1 = codecs.open(str(p1), \"r\", encoding=\"utf8\")\n f2 = codecs.open(str(p2), \"r\", encoding=\"utf8\")\n try:\n out = f1.read().splitlines()\n err = f2.read().splitlines()\n finally:\n f1.close()\n f2.close()\n self._dump_lines(out, sys.stdout)\n self._dump_lines(err, sys.stderr)\n return RunResult(ret, out, err, time.time() - now)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir._dump_lines_Testdir.runpython_c.return.self_run_sys_executable_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir._dump_lines_Testdir.runpython_c.return.self_run_sys_executable_", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1167, "end_line": 1187, "span_ids": ["Testdir.runpython", "Testdir._getpytestargs", "Testdir._dump_lines", "Testdir.runpython_c"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Testdir(object):\n\n def _dump_lines(self, lines, fp):\n try:\n for line in lines:\n print(line, file=fp)\n except UnicodeEncodeError:\n print(\"couldn't print to %s because of encoding\" % (fp,))\n\n def _getpytestargs(self):\n return sys.executable, \"-mpytest\"\n\n def runpython(self, script):\n \"\"\"Run a python script using sys.executable as interpreter.\n\n Returns a :py:class:`RunResult`.\n\n \"\"\"\n return self.run(sys.executable, script)\n\n def runpython_c(self, command):\n \"\"\"Run python -c \"command\", return a :py:class:`RunResult`.\"\"\"\n return self.run(sys.executable, \"-c\", command)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.runpytest_subprocess_Testdir.runpytest_subprocess.return.self_run_args_timeout_t": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.runpytest_subprocess_Testdir.runpytest_subprocess.return.self_run_args_timeout_t", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1189, "end_line": 1216, "span_ids": ["Testdir.runpytest_subprocess"], "tokens": 287}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Testdir(object):\n\n def runpytest_subprocess(self, *args, **kwargs):\n \"\"\"Run pytest as a subprocess with given arguments.\n\n Any plugins added to the :py:attr:`plugins` list will be added using the\n ``-p`` command line option. Additionally ``--basetemp`` is used to put\n any temporary files and directories in a numbered directory prefixed\n with \"runpytest-\" to not conflict with the normal numbered pytest\n location for temporary files and directories.\n\n :param args: the sequence of arguments to pass to the pytest subprocess\n :param timeout: the period in seconds after which to timeout and raise\n :py:class:`Testdir.TimeoutExpired`\n\n Returns a :py:class:`RunResult`.\n \"\"\"\n __tracebackhide__ = True\n timeout = kwargs.pop(\"timeout\", None)\n raise_on_kwargs(kwargs)\n\n p = py.path.local.make_numbered_dir(\n prefix=\"runpytest-\", keep=None, rootdir=self.tmpdir\n )\n args = (\"--basetemp=%s\" % p,) + args\n plugins = [x for x in self.plugins if isinstance(x, str)]\n if plugins:\n args = (\"-p\", plugins[0]) + args\n args = self._getpytestargs() + args\n return self.run(*args, timeout=timeout)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.spawn_pytest_Testdir.spawn_pytest.return.self_spawn_cmd_expect_ti": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.spawn_pytest_Testdir.spawn_pytest.return.self_spawn_cmd_expect_ti", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1218, "end_line": 1230, "span_ids": ["Testdir.spawn_pytest"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Testdir(object):\n\n def spawn_pytest(self, string, expect_timeout=10.0):\n \"\"\"Run pytest using pexpect.\n\n This makes sure to use the right pytest and sets up the temporary\n directory locations.\n\n The pexpect child is returned.\n\n \"\"\"\n basetemp = self.tmpdir.mkdir(\"temp-pexpect\")\n invoke = \" \".join(map(str, self._getpytestargs()))\n cmd = \"%s --basetemp=%s %s\" % (invoke, basetemp, string)\n return self.spawn(cmd, expect_timeout=expect_timeout)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.spawn_Testdir.spawn.return.child": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_Testdir.spawn_Testdir.spawn.return.child", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1232, "end_line": 1247, "span_ids": ["Testdir.spawn"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Testdir(object):\n\n def spawn(self, cmd, expect_timeout=10.0):\n \"\"\"Run a command using pexpect.\n\n The pexpect child is returned.\n\n \"\"\"\n pexpect = pytest.importorskip(\"pexpect\", \"3.0\")\n if hasattr(sys, \"pypy_version_info\") and \"64\" in platform.machine():\n pytest.skip(\"pypy-64 bit not supported\")\n if sys.platform.startswith(\"freebsd\"):\n pytest.xfail(\"pexpect does not work reliably on freebsd\")\n logfile = self.tmpdir.join(\"spawn.out\").open(\"wb\")\n child = pexpect.spawn(cmd, logfile=logfile)\n self.request.addfinalizer(logfile.close)\n child.timeout = expect_timeout\n return child", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_getdecoded_LineComp.assert_contains_lines.return.LineMatcher_lines1_fnmat": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_getdecoded_LineComp.assert_contains_lines.return.LineMatcher_lines1_fnmat", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1250, "end_line": 1272, "span_ids": ["LineComp.assert_contains_lines", "getdecoded", "LineComp"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def getdecoded(out):\n try:\n return out.decode(\"utf-8\")\n except UnicodeDecodeError:\n return \"INTERNAL not-utf8-decodeable, truncated string:\\n%s\" % (saferepr(out),)\n\n\nclass LineComp(object):\n def __init__(self):\n self.stringio = py.io.TextIO()\n\n def assert_contains_lines(self, lines2):\n \"\"\"Assert that lines2 are contained (linearly) in lines1.\n\n Return a list of extralines found.\n\n \"\"\"\n __tracebackhide__ = True\n val = self.stringio.getvalue()\n self.stringio.truncate(0)\n self.stringio.seek(0)\n lines1 = val.split(\"\\n\")\n return LineMatcher(lines1).fnmatch_lines(lines2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_LineMatcher_LineMatcher.re_match_lines_random.self__match_lines_random_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_LineMatcher_LineMatcher.re_match_lines_random.self__match_lines_random_", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1275, "end_line": 1317, "span_ids": ["LineMatcher._getlines", "LineMatcher.str", "LineMatcher.fnmatch_lines_random", "LineMatcher.re_match_lines_random", "LineMatcher"], "tokens": 293}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LineMatcher(object):\n \"\"\"Flexible matching of text.\n\n This is a convenience class to test large texts like the output of\n commands.\n\n The constructor takes a list of lines without their trailing newlines, i.e.\n ``text.splitlines()``.\n\n \"\"\"\n\n def __init__(self, lines):\n self.lines = lines\n self._log_output = []\n\n def str(self):\n \"\"\"Return the entire original text.\"\"\"\n return \"\\n\".join(self.lines)\n\n def _getlines(self, lines2):\n if isinstance(lines2, str):\n lines2 = Source(lines2)\n if isinstance(lines2, Source):\n lines2 = lines2.strip().lines\n return lines2\n\n def fnmatch_lines_random(self, lines2):\n \"\"\"Check lines exist in the output using in any order.\n\n Lines are checked using ``fnmatch.fnmatch``. The argument is a list of\n lines which have to occur in the output, in any order.\n\n \"\"\"\n self._match_lines_random(lines2, fnmatch)\n\n def re_match_lines_random(self, lines2):\n \"\"\"Check lines exist in the output using ``re.match``, in any order.\n\n The argument is a list of lines which have to occur in the output, in\n any order.\n\n \"\"\"\n self._match_lines_random(lines2, lambda name, pat: re.match(pat, name))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_LineMatcher._match_lines_random_LineMatcher._match_lines_random.for_line_in_lines2_.for_x_in_self_lines_.else_.raise_ValueError_self__lo": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_LineMatcher._match_lines_random_LineMatcher._match_lines_random.for_line_in_lines2_.for_x_in_self_lines_.else_.raise_ValueError_self__lo", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1319, "end_line": 1334, "span_ids": ["LineMatcher._match_lines_random"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LineMatcher(object):\n\n def _match_lines_random(self, lines2, match_func):\n \"\"\"Check lines exist in the output.\n\n The argument is a list of lines which have to occur in the output, in\n any order. Each line can contain glob whildcards.\n\n \"\"\"\n lines2 = self._getlines(lines2)\n for line in lines2:\n for x in self.lines:\n if line == x or match_func(x, line):\n self._log(\"matched: \", repr(line))\n break\n else:\n self._log(\"line %r not found in output\" % line)\n raise ValueError(self._log_text)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_LineMatcher.get_lines_after_LineMatcher.re_match_lines.self__match_lines_lines2_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_LineMatcher.get_lines_after_LineMatcher.re_match_lines.self__match_lines_lines2_", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1336, "end_line": 1375, "span_ids": ["LineMatcher._log", "LineMatcher.re_match_lines", "LineMatcher.fnmatch_lines", "LineMatcher._log_text", "LineMatcher.get_lines_after"], "tokens": 340}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LineMatcher(object):\n\n def get_lines_after(self, fnline):\n \"\"\"Return all lines following the given line in the text.\n\n The given line can contain glob wildcards.\n\n \"\"\"\n for i, line in enumerate(self.lines):\n if fnline == line or fnmatch(line, fnline):\n return self.lines[i + 1 :]\n raise ValueError(\"line %r not found in output\" % fnline)\n\n def _log(self, *args):\n self._log_output.append(\" \".join(str(x) for x in args))\n\n @property\n def _log_text(self):\n return \"\\n\".join(self._log_output)\n\n def fnmatch_lines(self, lines2):\n \"\"\"Search captured text for matching lines using ``fnmatch.fnmatch``.\n\n The argument is a list of lines which have to match and can use glob\n wildcards. If they do not match a pytest.fail() is called. The\n matches and non-matches are also printed on stdout.\n\n \"\"\"\n __tracebackhide__ = True\n self._match_lines(lines2, fnmatch, \"fnmatch\")\n\n def re_match_lines(self, lines2):\n \"\"\"Search captured text for matching lines using ``re.match``.\n\n The argument is a list of lines which have to match using ``re.match``.\n If they do not match a pytest.fail() is called.\n\n The matches and non-matches are also printed on stdout.\n\n \"\"\"\n __tracebackhide__ = True\n self._match_lines(lines2, lambda name, pat: re.match(pat, name), \"re.match\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_LineMatcher._match_lines_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/pytester.py_LineMatcher._match_lines_", "embedding": null, "metadata": {"file_path": "src/_pytest/pytester.py", "file_name": "pytester.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1377, "end_line": 1415, "span_ids": ["LineMatcher._match_lines"], "tokens": 344}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class LineMatcher(object):\n\n def _match_lines(self, lines2, match_func, match_nickname):\n \"\"\"Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``.\n\n :param list[str] lines2: list of string patterns to match. The actual\n format depends on ``match_func``\n :param match_func: a callable ``match_func(line, pattern)`` where line\n is the captured line from stdout/stderr and pattern is the matching\n pattern\n :param str match_nickname: the nickname for the match function that\n will be logged to stdout when a match occurs\n\n \"\"\"\n assert isinstance(lines2, Sequence)\n lines2 = self._getlines(lines2)\n lines1 = self.lines[:]\n nextline = None\n extralines = []\n __tracebackhide__ = True\n for line in lines2:\n nomatchprinted = False\n while lines1:\n nextline = lines1.pop(0)\n if line == nextline:\n self._log(\"exact match:\", repr(line))\n break\n elif match_func(nextline, line):\n self._log(\"%s:\" % match_nickname, repr(line))\n self._log(\" with:\", repr(nextline))\n break\n else:\n if not nomatchprinted:\n self._log(\"nomatch:\", repr(line))\n nomatchprinted = True\n self._log(\" and:\", repr(nextline))\n extralines.append(nextline)\n else:\n self._log(\"remains unmatched: %r\" % (line,))\n pytest.fail(self._log_text)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__Python_test_discovery_pyobj_property.return.property_get_None_None_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__Python_test_discovery_pyobj_property.return.property_get_None_None_", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 61, "span_ids": ["imports", "pyobj_property", "docstring"], "tokens": 422}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" Python test discovery, setup and run of test functions. \"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport fnmatch\nimport inspect\nimport os\nimport sys\nimport warnings\nfrom functools import partial\nfrom textwrap import dedent\n\nimport py\nimport six\n\nimport _pytest\nfrom _pytest import deprecated\nfrom _pytest import fixtures\nfrom _pytest import nodes\nfrom _pytest._code import filter_traceback\nfrom _pytest.compat import ascii_escaped\nfrom _pytest.compat import enum\nfrom _pytest.compat import get_default_arg_names\nfrom _pytest.compat import get_real_func\nfrom _pytest.compat import getfslineno\nfrom _pytest.compat import getimfunc\nfrom _pytest.compat import getlocation\nfrom _pytest.compat import is_generator\nfrom _pytest.compat import isclass\nfrom _pytest.compat import isfunction\nfrom _pytest.compat import NoneType\nfrom _pytest.compat import NOTSET\nfrom _pytest.compat import REGEX_TYPE\nfrom _pytest.compat import safe_getattr\nfrom _pytest.compat import safe_isclass\nfrom _pytest.compat import safe_str\nfrom _pytest.compat import STRING_TYPES\nfrom _pytest.config import hookimpl\nfrom _pytest.main import FSHookProxy\nfrom _pytest.mark import MARK_GEN\nfrom _pytest.mark.structures import get_unpacked_marks\nfrom _pytest.mark.structures import normalize_mark_list\nfrom _pytest.outcomes import fail\nfrom _pytest.outcomes import skip\nfrom _pytest.pathlib import parts\nfrom _pytest.warning_types import PytestCollectionWarning\nfrom _pytest.warning_types import PytestUnhandledCoroutineWarning\n\n\ndef pyobj_property(name):\n def get(self):\n node = self.getparent(getattr(__import__(\"pytest\"), name))\n if node is not None:\n return node.obj\n\n doc = \"python %s object this node was collected from (can be None).\" % (\n name.lower(),\n )\n return property(get, None, None, doc)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_pytest_addoption_pytest_addoption.None_7": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_pytest_addoption_pytest_addoption.None_7", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 64, "end_line": 121, "span_ids": ["pytest_addoption"], "tokens": 375}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_addoption(parser):\n group = parser.getgroup(\"general\")\n group.addoption(\n \"--fixtures\",\n \"--funcargs\",\n action=\"store_true\",\n dest=\"showfixtures\",\n default=False,\n help=\"show available fixtures, sorted by plugin appearance \"\n \"(fixtures with leading '_' are only shown with '-v')\",\n )\n group.addoption(\n \"--fixtures-per-test\",\n action=\"store_true\",\n dest=\"show_fixtures_per_test\",\n default=False,\n help=\"show fixtures per test\",\n )\n parser.addini(\n \"usefixtures\",\n type=\"args\",\n default=[],\n help=\"list of default fixtures to be used with this project\",\n )\n parser.addini(\n \"python_files\",\n type=\"args\",\n default=[\"test_*.py\", \"*_test.py\"],\n help=\"glob-style file patterns for Python test module discovery\",\n )\n parser.addini(\n \"python_classes\",\n type=\"args\",\n default=[\"Test\"],\n help=\"prefixes or glob names for Python test class discovery\",\n )\n parser.addini(\n \"python_functions\",\n type=\"args\",\n default=[\"test\"],\n help=\"prefixes or glob names for Python test function and method discovery\",\n )\n parser.addini(\n \"disable_test_id_escaping_and_forfeit_all_rights_to_community_support\",\n type=\"bool\",\n default=False,\n help=\"disable string escape non-ascii characters, might cause unwanted \"\n \"side effects(use at your own risk)\",\n )\n\n group.addoption(\n \"--import-mode\",\n default=\"prepend\",\n choices=[\"prepend\", \"append\"],\n dest=\"importmode\",\n help=\"prepend/append to sys.path when importing test modules, \"\n \"default is to prepend.\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_pytest_cmdline_main_pytest_generate_tests.for_marker_in_metafunc_de.metafunc_parametrize_mar": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_pytest_cmdline_main_pytest_generate_tests.for_marker_in_metafunc_de.metafunc_parametrize_mar", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 124, "end_line": 142, "span_ids": ["pytest_cmdline_main", "pytest_generate_tests"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_cmdline_main(config):\n if config.option.showfixtures:\n showfixtures(config)\n return 0\n if config.option.show_fixtures_per_test:\n show_fixtures_per_test(config)\n return 0\n\n\ndef pytest_generate_tests(metafunc):\n # those alternative spellings are common - raise a specific error to alert\n # the user\n alt_spellings = [\"parameterize\", \"parametrise\", \"parameterise\"]\n for mark_name in alt_spellings:\n if metafunc.definition.get_closest_marker(mark_name):\n msg = \"{0} has '{1}' mark, spelling should be 'parametrize'\"\n fail(msg.format(metafunc.function.__name__, mark_name), pytrace=False)\n for marker in metafunc.definition.iter_markers(name=\"parametrize\"):\n metafunc.parametrize(*marker.args, **marker.kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_pytest_configure_pytest_configure.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_pytest_configure_pytest_configure.None_1", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 145, "end_line": 162, "span_ids": ["pytest_configure"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_configure(config):\n config.addinivalue_line(\n \"markers\",\n \"parametrize(argnames, argvalues): call a test function multiple \"\n \"times passing in different arguments in turn. argvalues generally \"\n \"needs to be a list of values if argnames specifies only one name \"\n \"or a list of tuples of values if argnames specifies multiple names. \"\n \"Example: @parametrize('arg1', [1,2]) would lead to two calls of the \"\n \"decorated test function, one with arg1=1 and another with arg1=2.\"\n \"see https://docs.pytest.org/en/latest/parametrize.html for more info \"\n \"and examples.\",\n )\n config.addinivalue_line(\n \"markers\",\n \"usefixtures(fixturename1, fixturename2, ...): mark tests as needing \"\n \"all of the specified fixtures. see \"\n \"https://docs.pytest.org/en/latest/fixture.html#usefixtures \",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_pytest_pyfunc_call_pytest_pyfunc_call.return.True": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_pytest_pyfunc_call_pytest_pyfunc_call.return.True", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 165, "end_line": 180, "span_ids": ["pytest_pyfunc_call"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@hookimpl(trylast=True)\ndef pytest_pyfunc_call(pyfuncitem):\n testfunction = pyfuncitem.obj\n iscoroutinefunction = getattr(inspect, \"iscoroutinefunction\", None)\n if iscoroutinefunction is not None and iscoroutinefunction(testfunction):\n msg = \"Coroutine functions are not natively supported and have been skipped.\\n\"\n msg += \"You need to install a suitable plugin for your async framework, for example:\\n\"\n msg += \" - pytest-asyncio\\n\"\n msg += \" - pytest-trio\\n\"\n msg += \" - pytest-tornasync\"\n warnings.warn(PytestUnhandledCoroutineWarning(msg.format(pyfuncitem.nodeid)))\n skip(msg=\"coroutine function and no async plugin installed (see warnings)\")\n funcargs = pyfuncitem.funcargs\n testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames}\n testfunction(**testargs)\n return True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_pytest_collect_file_pytest_pycollect_makemodule.return.Module_path_parent_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_pytest_collect_file_pytest_pycollect_makemodule.return.Module_path_parent_", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 183, "end_line": 203, "span_ids": ["path_matches_patterns", "pytest_pycollect_makemodule", "pytest_collect_file"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_collect_file(path, parent):\n ext = path.ext\n if ext == \".py\":\n if not parent.session.isinitpath(path):\n if not path_matches_patterns(\n path, parent.config.getini(\"python_files\") + [\"__init__.py\"]\n ):\n return\n ihook = parent.session.gethookproxy(path)\n return ihook.pytest_pycollect_makemodule(path=path, parent=parent)\n\n\ndef path_matches_patterns(path, patterns):\n \"\"\"Returns True if the given py.path.local matches one of the patterns in the list of globs given\"\"\"\n return any(path.fnmatch(pattern) for pattern in patterns)\n\n\ndef pytest_pycollect_makemodule(path, parent):\n if path.basename == \"__init__.py\":\n return Package(path, parent)\n return Module(path, parent)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_pytest_pycollect_makeitem_pytest_pycollect_makeitem.if_safe_isclass_obj_.elif_collector_istestfunc.if_not_isfunction_obj_o.elif_getattr_obj___test.outcome_force_result_res_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_pytest_pycollect_makeitem_pytest_pycollect_makeitem.if_safe_isclass_obj_.elif_collector_istestfunc.if_not_isfunction_obj_o.elif_getattr_obj___test.outcome_force_result_res_", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 206, "end_line": 240, "span_ids": ["pytest_pycollect_makeitem"], "tokens": 334}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@hookimpl(hookwrapper=True)\ndef pytest_pycollect_makeitem(collector, name, obj):\n outcome = yield\n res = outcome.get_result()\n if res is not None:\n return\n # nothing was collected elsewhere, let's do it here\n if safe_isclass(obj):\n if collector.istestclass(obj, name):\n outcome.force_result(Class(name, parent=collector))\n elif collector.istestfunction(obj, name):\n # mock seems to store unbound methods (issue473), normalize it\n obj = getattr(obj, \"__func__\", obj)\n # We need to try and unwrap the function if it's a functools.partial\n # or a funtools.wrapped.\n # We musn't if it's been wrapped with mock.patch (python 2 only)\n if not (isfunction(obj) or isfunction(get_real_func(obj))):\n filename, lineno = getfslineno(obj)\n warnings.warn_explicit(\n message=PytestCollectionWarning(\n \"cannot collect %r because it is not a function.\" % name\n ),\n category=None,\n filename=str(filename),\n lineno=lineno + 1,\n )\n elif getattr(obj, \"__test__\", True):\n if is_generator(obj):\n res = Function(name, parent=collector)\n reason = deprecated.YIELD_TESTS.format(name=name)\n res.add_marker(MARK_GEN.xfail(run=False, reason=reason))\n res.warn(PytestCollectionWarning(reason))\n else:\n res = list(collector._genfunctions(name, obj))\n outcome.force_result(res)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_pytest_make_parametrize_id_PyobjMixin._getobj.return.getattr_self_parent_obj_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_pytest_make_parametrize_id_PyobjMixin._getobj.return.getattr_self_parent_obj_", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 243, "end_line": 277, "span_ids": ["PyobjContext", "pytest_make_parametrize_id", "PyobjMixin.obj", "PyobjMixin._getobj", "PyobjMixin.obj_1", "PyobjMixin"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_make_parametrize_id(config, val, argname=None):\n return None\n\n\nclass PyobjContext(object):\n module = pyobj_property(\"Module\")\n cls = pyobj_property(\"Class\")\n instance = pyobj_property(\"Instance\")\n\n\nclass PyobjMixin(PyobjContext):\n _ALLOW_MARKERS = True\n\n def __init__(self, *k, **kw):\n super(PyobjMixin, self).__init__(*k, **kw)\n\n @property\n def obj(self):\n \"\"\"Underlying Python object.\"\"\"\n obj = getattr(self, \"_obj\", None)\n if obj is None:\n self._obj = obj = self._getobj()\n # XXX evil hack\n # used to avoid Instance collector marker duplication\n if self._ALLOW_MARKERS:\n self.own_markers.extend(get_unpacked_marks(self.obj))\n return obj\n\n @obj.setter\n def obj(self, value):\n self._obj = value\n\n def _getobj(self):\n \"\"\"Gets the underlying Python object. May be overwritten by subclasses.\"\"\"\n return getattr(self.parent.obj, self.name)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_PyobjMixin.getmodpath_PyobjMixin.getmodpath.return.s_replace_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_PyobjMixin.getmodpath_PyobjMixin.getmodpath.return.s_replace_", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 279, "end_line": 297, "span_ids": ["PyobjMixin.getmodpath"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class PyobjMixin(PyobjContext):\n\n def getmodpath(self, stopatmodule=True, includemodule=False):\n \"\"\" return python path relative to the containing module. \"\"\"\n chain = self.listchain()\n chain.reverse()\n parts = []\n for node in chain:\n if isinstance(node, Instance):\n continue\n name = node.name\n if isinstance(node, Module):\n name = os.path.splitext(name)[0]\n if stopatmodule:\n if includemodule:\n parts.append(name)\n break\n parts.append(name)\n parts.reverse()\n s = \".\".join(parts)\n return s.replace(\".[\", \"[\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_PyobjMixin.reportinfo_PyobjMixin.reportinfo.return.fspath_lineno_modpath": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_PyobjMixin.reportinfo_PyobjMixin.reportinfo.return.fspath_lineno_modpath", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 299, "end_line": 313, "span_ids": ["PyobjMixin.reportinfo"], "tokens": 141}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class PyobjMixin(PyobjContext):\n\n def reportinfo(self):\n # XXX caching?\n obj = self.obj\n compat_co_firstlineno = getattr(obj, \"compat_co_firstlineno\", None)\n if isinstance(compat_co_firstlineno, int):\n # nose compatibility\n fspath = sys.modules[obj.__module__].__file__\n if fspath.endswith(\".pyc\"):\n fspath = fspath[:-1]\n lineno = compat_co_firstlineno\n else:\n fspath, lineno = getfslineno(obj)\n modpath = self.getmodpath()\n assert isinstance(lineno, int)\n return fspath, lineno, modpath", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_PyCollector_PyCollector.istestclass.return.self_classnamefilter_name": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_PyCollector_PyCollector.istestclass.return.self_classnamefilter_name", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 316, "end_line": 345, "span_ids": ["PyCollector.istestclass", "PyCollector.isnosetest", "PyCollector", "PyCollector.classnamefilter", "PyCollector.funcnamefilter", "PyCollector.istestfunction"], "tokens": 272}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class PyCollector(PyobjMixin, nodes.Collector):\n def funcnamefilter(self, name):\n return self._matches_prefix_or_glob_option(\"python_functions\", name)\n\n def isnosetest(self, obj):\n \"\"\" Look for the __test__ attribute, which is applied by the\n @nose.tools.istest decorator\n \"\"\"\n # We explicitly check for \"is True\" here to not mistakenly treat\n # classes with a custom __getattr__ returning something truthy (like a\n # function) as test classes.\n return safe_getattr(obj, \"__test__\", False) is True\n\n def classnamefilter(self, name):\n return self._matches_prefix_or_glob_option(\"python_classes\", name)\n\n def istestfunction(self, obj, name):\n if self.funcnamefilter(name) or self.isnosetest(obj):\n if isinstance(obj, staticmethod):\n # static methods need to be unwrapped\n obj = safe_getattr(obj, \"__func__\", False)\n return (\n safe_getattr(obj, \"__call__\", False)\n and fixtures.getfixturemarker(obj) is None\n )\n else:\n return False\n\n def istestclass(self, obj, name):\n return self.classnamefilter(name) or self.isnosetest(obj)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_PyCollector._matches_prefix_or_glob_option_PyCollector._matches_prefix_or_glob_option.return.False": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_PyCollector._matches_prefix_or_glob_option_PyCollector._matches_prefix_or_glob_option.return.False", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 347, "end_line": 362, "span_ids": ["PyCollector._matches_prefix_or_glob_option"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class PyCollector(PyobjMixin, nodes.Collector):\n\n def _matches_prefix_or_glob_option(self, option_name, name):\n \"\"\"\n checks if the given name matches the prefix or glob-pattern defined\n in ini configuration.\n \"\"\"\n for option in self.config.getini(option_name):\n if name.startswith(option):\n return True\n # check that name looks like a glob-string before calling fnmatch\n # because this is called for every name in each collected module,\n # and fnmatch is somewhat expensive to call\n elif (\"*\" in option or \"?\" in option or \"[\" in option) and fnmatch.fnmatch(\n name, option\n ):\n return True\n return False", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_PyCollector.collect_PyCollector._makeitem.return.self_ihook_pytest_pycolle": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_PyCollector.collect_PyCollector._makeitem.return.self_ihook_pytest_pycolle", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 364, "end_line": 391, "span_ids": ["PyCollector._makeitem", "PyCollector.collect"], "tokens": 249}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class PyCollector(PyobjMixin, nodes.Collector):\n\n def collect(self):\n if not getattr(self.obj, \"__test__\", True):\n return []\n\n # NB. we avoid random getattrs and peek in the __dict__ instead\n # (XXX originally introduced from a PyPy need, still true?)\n dicts = [getattr(self.obj, \"__dict__\", {})]\n for basecls in inspect.getmro(self.obj.__class__):\n dicts.append(basecls.__dict__)\n seen = {}\n values = []\n for dic in dicts:\n for name, obj in list(dic.items()):\n if name in seen:\n continue\n seen[name] = True\n res = self._makeitem(name, obj)\n if res is None:\n continue\n if not isinstance(res, list):\n res = [res]\n values.extend(res)\n values.sort(key=lambda item: item.reportinfo()[:2])\n return values\n\n def _makeitem(self, name, obj):\n # assert self.ihook.fspath == self.fspath, self\n return self.ihook.pytest_pycollect_makeitem(collector=self, name=name, obj=obj)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_PyCollector._genfunctions_PyCollector._genfunctions.if_not_metafunc__calls_.else_.for_callspec_in_metafunc_.yield_Function_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_PyCollector._genfunctions_PyCollector._genfunctions.if_not_metafunc__calls_.else_.for_callspec_in_metafunc_.yield_Function_", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 393, "end_line": 438, "span_ids": ["PyCollector._genfunctions"], "tokens": 394}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class PyCollector(PyobjMixin, nodes.Collector):\n\n def _genfunctions(self, name, funcobj):\n module = self.getparent(Module).obj\n clscol = self.getparent(Class)\n cls = clscol and clscol.obj or None\n fm = self.session._fixturemanager\n\n definition = FunctionDefinition(name=name, parent=self, callobj=funcobj)\n fixtureinfo = fm.getfixtureinfo(definition, funcobj, cls)\n\n metafunc = Metafunc(\n definition, fixtureinfo, self.config, cls=cls, module=module\n )\n methods = []\n if hasattr(module, \"pytest_generate_tests\"):\n methods.append(module.pytest_generate_tests)\n if hasattr(cls, \"pytest_generate_tests\"):\n methods.append(cls().pytest_generate_tests)\n if methods:\n self.ihook.pytest_generate_tests.call_extra(\n methods, dict(metafunc=metafunc)\n )\n else:\n self.ihook.pytest_generate_tests(metafunc=metafunc)\n\n if not metafunc._calls:\n yield Function(name, parent=self, fixtureinfo=fixtureinfo)\n else:\n # add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs\n fixtures.add_funcarg_pseudo_fixture_def(self, metafunc, fm)\n\n # add_funcarg_pseudo_fixture_def may have shadowed some fixtures\n # with direct parametrization, so make sure we update what the\n # function really needs.\n fixtureinfo.prune_dependency_tree()\n\n for callspec in metafunc._calls:\n subname = \"%s[%s]\" % (name, callspec.id)\n yield Function(\n name=subname,\n parent=self,\n callspec=callspec,\n callobj=funcobj,\n fixtureinfo=fixtureinfo,\n keywords={callspec.id: True},\n originalname=name,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Module_Module._inject_setup_module_fixture.self.obj.__pytest_setup_module.xunit_setup_module_fixtur": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Module_Module._inject_setup_module_fixture.self.obj.__pytest_setup_module.xunit_setup_module_fixtur", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 441, "end_line": 479, "span_ids": ["Module.collect", "Module", "Module._inject_setup_module_fixture", "Module._getobj"], "tokens": 315}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Module(nodes.File, PyCollector):\n \"\"\" Collector for test classes and functions. \"\"\"\n\n def _getobj(self):\n return self._importtestmodule()\n\n def collect(self):\n self._inject_setup_module_fixture()\n self._inject_setup_function_fixture()\n self.session._fixturemanager.parsefactories(self)\n return super(Module, self).collect()\n\n def _inject_setup_module_fixture(self):\n \"\"\"Injects a hidden autouse, module scoped fixture into the collected module object\n that invokes setUpModule/tearDownModule if either or both are available.\n\n Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with\n other fixtures (#517).\n \"\"\"\n setup_module = _get_non_fixture_func(self.obj, \"setUpModule\")\n if setup_module is None:\n setup_module = _get_non_fixture_func(self.obj, \"setup_module\")\n\n teardown_module = _get_non_fixture_func(self.obj, \"tearDownModule\")\n if teardown_module is None:\n teardown_module = _get_non_fixture_func(self.obj, \"teardown_module\")\n\n if setup_module is None and teardown_module is None:\n return\n\n @fixtures.fixture(autouse=True, scope=\"module\")\n def xunit_setup_module_fixture(request):\n if setup_module is not None:\n _call_with_optional_argument(setup_module, request.module)\n yield\n if teardown_module is not None:\n _call_with_optional_argument(teardown_module, request.module)\n\n self.obj.__pytest_setup_module = xunit_setup_module_fixture", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Module._inject_setup_function_fixture_Module._inject_setup_function_fixture.self.obj.__pytest_setup_function.xunit_setup_function_fixt": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Module._inject_setup_function_fixture_Module._inject_setup_function_fixture.self.obj.__pytest_setup_function.xunit_setup_function_fixt", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 481, "end_line": 506, "span_ids": ["Module._inject_setup_function_fixture"], "tokens": 243}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Module(nodes.File, PyCollector):\n\n def _inject_setup_function_fixture(self):\n \"\"\"Injects a hidden autouse, function scoped fixture into the collected module object\n that invokes setup_function/teardown_function if either or both are available.\n\n Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with\n other fixtures (#517).\n \"\"\"\n setup_function = _get_non_fixture_func(self.obj, \"setup_function\")\n teardown_function = _get_non_fixture_func(self.obj, \"teardown_function\")\n if setup_function is None and teardown_function is None:\n return\n\n @fixtures.fixture(autouse=True, scope=\"function\")\n def xunit_setup_function_fixture(request):\n if request.instance is not None:\n # in this case we are bound to an instance, so we need to let\n # setup_method handle this\n yield\n return\n if setup_function is not None:\n _call_with_optional_argument(setup_function, request.function)\n yield\n if teardown_function is not None:\n _call_with_optional_argument(teardown_function, request.function)\n\n self.obj.__pytest_setup_function = xunit_setup_function_fixture", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Module._importtestmodule_Module._importtestmodule.return.mod": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Module._importtestmodule_Module._importtestmodule.return.mod", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 508, "end_line": 556, "span_ids": ["Module._importtestmodule"], "tokens": 469}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Module(nodes.File, PyCollector):\n\n def _importtestmodule(self):\n # we assume we are only called once per module\n importmode = self.config.getoption(\"--import-mode\")\n try:\n mod = self.fspath.pyimport(ensuresyspath=importmode)\n except SyntaxError:\n raise self.CollectError(\n _pytest._code.ExceptionInfo.from_current().getrepr(style=\"short\")\n )\n except self.fspath.ImportMismatchError:\n e = sys.exc_info()[1]\n raise self.CollectError(\n \"import file mismatch:\\n\"\n \"imported module %r has this __file__ attribute:\\n\"\n \" %s\\n\"\n \"which is not the same as the test file we want to collect:\\n\"\n \" %s\\n\"\n \"HINT: remove __pycache__ / .pyc files and/or use a \"\n \"unique basename for your test file modules\" % e.args\n )\n except ImportError:\n from _pytest._code.code import ExceptionInfo\n\n exc_info = ExceptionInfo.from_current()\n if self.config.getoption(\"verbose\") < 2:\n exc_info.traceback = exc_info.traceback.filter(filter_traceback)\n exc_repr = (\n exc_info.getrepr(style=\"short\")\n if exc_info.traceback\n else exc_info.exconly()\n )\n formatted_tb = safe_str(exc_repr)\n raise self.CollectError(\n \"ImportError while importing test module '{fspath}'.\\n\"\n \"Hint: make sure your test modules/packages have valid Python names.\\n\"\n \"Traceback:\\n\"\n \"{traceback}\".format(fspath=self.fspath, traceback=formatted_tb)\n )\n except _pytest.runner.Skipped as e:\n if e.allow_module_level:\n raise\n raise self.CollectError(\n \"Using pytest.skip outside of a test is not allowed. \"\n \"To decorate a test function, use the @pytest.mark.skip \"\n \"or @pytest.mark.skipif decorators instead, and to skip a \"\n \"module use `pytestmark = pytest.mark.{skip,skipif}.\"\n )\n self.config.pluginmanager.consider_module(mod)\n return mod", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Package_Package.setup.if_teardown_module_is_not.self_addfinalizer_func_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Package_Package.setup.if_teardown_module_is_not.self_addfinalizer_func_", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 559, "end_line": 584, "span_ids": ["Package", "Package.setup"], "tokens": 266}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Package(Module):\n def __init__(self, fspath, parent=None, config=None, session=None, nodeid=None):\n session = parent.session\n nodes.FSCollector.__init__(\n self, fspath, parent=parent, config=config, session=session, nodeid=nodeid\n )\n self.name = fspath.dirname\n self.trace = session.trace\n self._norecursepatterns = session._norecursepatterns\n self.fspath = fspath\n\n def setup(self):\n # not using fixtures to call setup_module here because autouse fixtures\n # from packages are not called automatically (#4085)\n setup_module = _get_non_fixture_func(self.obj, \"setUpModule\")\n if setup_module is None:\n setup_module = _get_non_fixture_func(self.obj, \"setup_module\")\n if setup_module is not None:\n _call_with_optional_argument(setup_module, self.obj)\n\n teardown_module = _get_non_fixture_func(self.obj, \"tearDownModule\")\n if teardown_module is None:\n teardown_module = _get_non_fixture_func(self.obj, \"teardown_module\")\n if teardown_module is not None:\n func = partial(_call_with_optional_argument, teardown_module, self.obj)\n self.addfinalizer(func)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Package._recurse_Package.gethookproxy.return.proxy": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Package._recurse_Package.gethookproxy.return.proxy", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 586, "end_line": 611, "span_ids": ["Package._recurse", "Package.gethookproxy"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Package(Module):\n\n def _recurse(self, dirpath):\n if dirpath.basename == \"__pycache__\":\n return False\n ihook = self.gethookproxy(dirpath.dirpath())\n if ihook.pytest_ignore_collect(path=dirpath, config=self.config):\n return\n for pat in self._norecursepatterns:\n if dirpath.check(fnmatch=pat):\n return False\n ihook = self.gethookproxy(dirpath)\n ihook.pytest_collect_directory(path=dirpath, parent=self)\n return True\n\n def gethookproxy(self, fspath):\n # check if we have the common case of running\n # hooks with all conftest.py filesall conftest.py\n pm = self.config.pluginmanager\n my_conftestmodules = pm._getconftestmodules(fspath)\n remove_mods = pm._conftest_plugins.difference(my_conftestmodules)\n if remove_mods:\n # one or more conftests are not in use at this fspath\n proxy = FSHookProxy(fspath, pm, remove_mods)\n else:\n # all plugis are active for this fspath\n proxy = self.config.hook\n return proxy", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Package._collectfile_Package.isinitpath.return.path_in_self_session__ini": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Package._collectfile_Package.isinitpath.return.path_in_self_session__ini", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 613, "end_line": 640, "span_ids": ["Package._collectfile", "Package.isinitpath"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Package(Module):\n\n def _collectfile(self, path, handle_dupes=True):\n assert path.isfile(), \"%r is not a file (isdir=%r, exists=%r, islink=%r)\" % (\n path,\n path.isdir(),\n path.exists(),\n path.islink(),\n )\n ihook = self.gethookproxy(path)\n if not self.isinitpath(path):\n if ihook.pytest_ignore_collect(path=path, config=self.config):\n return ()\n\n if handle_dupes:\n keepduplicates = self.config.getoption(\"keepduplicates\")\n if not keepduplicates:\n duplicate_paths = self.config.pluginmanager._duplicatepaths\n if path in duplicate_paths:\n return ()\n else:\n duplicate_paths.add(path)\n\n if self.fspath == path: # __init__.py\n return [self]\n\n return ihook.pytest_collect_file(path=path, parent=self)\n\n def isinitpath(self, path):\n return path in self.session._initialpaths", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Package.collect_Package.collect.for_path_in_this_path_vis.None_2.elif_path_join___init___.pkg_prefixes_add_path_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Package.collect_Package.collect.for_path_in_this_path_vis.None_2.elif_path_join___init___.pkg_prefixes_add_path_", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 642, "end_line": 671, "span_ids": ["Package.collect"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Package(Module):\n\n def collect(self):\n this_path = self.fspath.dirpath()\n init_module = this_path.join(\"__init__.py\")\n if init_module.check(file=1) and path_matches_patterns(\n init_module, self.config.getini(\"python_files\")\n ):\n yield Module(init_module, self)\n pkg_prefixes = set()\n for path in this_path.visit(rec=self._recurse, bf=True, sort=True):\n # We will visit our own __init__.py file, in which case we skip it.\n is_file = path.isfile()\n if is_file:\n if path.basename == \"__init__.py\" and path.dirpath() == this_path:\n continue\n\n parts_ = parts(path.strpath)\n if any(\n pkg_prefix in parts_ and pkg_prefix.join(\"__init__.py\") != path\n for pkg_prefix in pkg_prefixes\n ):\n continue\n\n if is_file:\n for x in self._collectfile(path):\n yield x\n elif not path.isdir():\n # Broken symlink or invalid/missing file.\n continue\n elif path.join(\"__init__.py\").check(file=1):\n pkg_prefixes.add(path)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__get_xunit_setup_teardown__get_xunit_setup_teardown.if_result_is_not_None_.if_arg_count_.else_.return.result": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__get_xunit_setup_teardown__get_xunit_setup_teardown.if_result_is_not_None_.if_arg_count_.else_.return.result", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 674, "end_line": 692, "span_ids": ["_get_xunit_setup_teardown"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _get_xunit_setup_teardown(holder, attr_name, param_obj=None):\n \"\"\"\n Return a callable to perform xunit-style setup or teardown if\n the function exists in the ``holder`` object.\n The ``param_obj`` parameter is the parameter which will be passed to the function\n when the callable is called without arguments, defaults to the ``holder`` object.\n Return ``None`` if a suitable callable is not found.\n \"\"\"\n # TODO: only needed because of Package!\n param_obj = param_obj if param_obj is not None else holder\n result = _get_non_fixture_func(holder, attr_name)\n if result is not None:\n arg_count = result.__code__.co_argcount\n if inspect.ismethod(result):\n arg_count -= 1\n if arg_count:\n return lambda: result(param_obj)\n else:\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__call_with_optional_argument__get_non_fixture_func.if_fixtures_getfixturemar.return.meth": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__call_with_optional_argument__get_non_fixture_func.if_fixtures_getfixturemar.return.meth", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 695, "end_line": 714, "span_ids": ["_get_non_fixture_func", "_call_with_optional_argument"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _call_with_optional_argument(func, arg):\n \"\"\"Call the given function with the given argument if func accepts one argument, otherwise\n calls func without arguments\"\"\"\n arg_count = func.__code__.co_argcount\n if inspect.ismethod(func):\n arg_count -= 1\n if arg_count:\n func(arg)\n else:\n func()\n\n\ndef _get_non_fixture_func(obj, name):\n \"\"\"Return the attribute from the given object to be used as a setup/teardown\n xunit-style function, but only if not marked as a fixture to\n avoid calling it twice.\n \"\"\"\n meth = getattr(obj, name, None)\n if fixtures.getfixturemarker(meth) is None:\n return meth", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Class_Class.collect.return._Instance_name_pare": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Class_Class.collect.return._Instance_name_pare", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 717, "end_line": 743, "span_ids": ["Class", "Class.collect"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Class(PyCollector):\n \"\"\" Collector for test methods. \"\"\"\n\n def collect(self):\n if not safe_getattr(self.obj, \"__test__\", True):\n return []\n if hasinit(self.obj):\n self.warn(\n PytestCollectionWarning(\n \"cannot collect test class %r because it has a \"\n \"__init__ constructor\" % self.obj.__name__\n )\n )\n return []\n elif hasnew(self.obj):\n self.warn(\n PytestCollectionWarning(\n \"cannot collect test class %r because it has a \"\n \"__new__ constructor\" % self.obj.__name__\n )\n )\n return []\n\n self._inject_setup_class_fixture()\n self._inject_setup_method_fixture()\n\n return [Instance(name=\"()\", parent=self)]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Class._inject_setup_class_fixture_Class._inject_setup_class_fixture.self.obj.__pytest_setup_class.xunit_setup_class_fixture": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Class._inject_setup_class_fixture_Class._inject_setup_class_fixture.self.obj.__pytest_setup_class.xunit_setup_class_fixture", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 745, "end_line": 767, "span_ids": ["Class._inject_setup_class_fixture"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Class(PyCollector):\n\n def _inject_setup_class_fixture(self):\n \"\"\"Injects a hidden autouse, class scoped fixture into the collected class object\n that invokes setup_class/teardown_class if either or both are available.\n\n Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with\n other fixtures (#517).\n \"\"\"\n setup_class = _get_non_fixture_func(self.obj, \"setup_class\")\n teardown_class = getattr(self.obj, \"teardown_class\", None)\n if setup_class is None and teardown_class is None:\n return\n\n @fixtures.fixture(autouse=True, scope=\"class\")\n def xunit_setup_class_fixture(cls):\n if setup_class is not None:\n func = getimfunc(setup_class)\n _call_with_optional_argument(func, self.obj)\n yield\n if teardown_class is not None:\n func = getimfunc(teardown_class)\n _call_with_optional_argument(func, self.obj)\n\n self.obj.__pytest_setup_class = xunit_setup_class_fixture", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Class._inject_setup_method_fixture_Instance.newinstance.return.self_obj": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Class._inject_setup_method_fixture_Instance.newinstance.return.self_obj", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 769, "end_line": 810, "span_ids": ["Class._inject_setup_method_fixture", "Instance._getobj", "Instance.collect", "Instance.newinstance", "Instance"], "tokens": 336}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Class(PyCollector):\n\n def _inject_setup_method_fixture(self):\n \"\"\"Injects a hidden autouse, function scoped fixture into the collected class object\n that invokes setup_method/teardown_method if either or both are available.\n\n Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with\n other fixtures (#517).\n \"\"\"\n setup_method = _get_non_fixture_func(self.obj, \"setup_method\")\n teardown_method = getattr(self.obj, \"teardown_method\", None)\n if setup_method is None and teardown_method is None:\n return\n\n @fixtures.fixture(autouse=True, scope=\"function\")\n def xunit_setup_method_fixture(self, request):\n method = request.function\n if setup_method is not None:\n func = getattr(self, \"setup_method\")\n _call_with_optional_argument(func, method)\n yield\n if teardown_method is not None:\n func = getattr(self, \"teardown_method\")\n _call_with_optional_argument(func, method)\n\n self.obj.__pytest_setup_method = xunit_setup_method_fixture\n\n\nclass Instance(PyCollector):\n _ALLOW_MARKERS = False # hack, destroy later\n # instances share the object with their parents in a way\n # that duplicates markers instances if not taken out\n # can be removed at node structure reorganization time\n\n def _getobj(self):\n return self.parent.obj()\n\n def collect(self):\n self.session._fixturemanager.parsefactories(self)\n return super(Instance, self).collect()\n\n def newinstance(self):\n self.obj = self._getobj()\n return self.obj", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_FunctionMixin_FunctionMixin.repr_failure.return.self__repr_failure_py_exc": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_FunctionMixin_FunctionMixin.repr_failure.return.self__repr_failure_py_exc", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 813, "end_line": 849, "span_ids": ["FunctionMixin.setup", "FunctionMixin", "FunctionMixin.repr_failure", "FunctionMixin._prunetraceback"], "tokens": 354}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FunctionMixin(PyobjMixin):\n \"\"\" mixin for the code common to Function and Generator.\n \"\"\"\n\n def setup(self):\n \"\"\" perform setup for this test function. \"\"\"\n if isinstance(self.parent, Instance):\n self.parent.newinstance()\n self.obj = self._getobj()\n\n def _prunetraceback(self, excinfo):\n if hasattr(self, \"_obj\") and not self.config.getoption(\"fulltrace\", False):\n code = _pytest._code.Code(get_real_func(self.obj))\n path, firstlineno = code.path, code.firstlineno\n traceback = excinfo.traceback\n ntraceback = traceback.cut(path=path, firstlineno=firstlineno)\n if ntraceback == traceback:\n ntraceback = ntraceback.cut(path=path)\n if ntraceback == traceback:\n ntraceback = ntraceback.filter(filter_traceback)\n if not ntraceback:\n ntraceback = traceback\n\n excinfo.traceback = ntraceback.filter()\n # issue364: mark all but first and last frames to\n # only show a single-line message for each frame\n if self.config.getoption(\"tbstyle\", \"auto\") == \"auto\":\n if len(excinfo.traceback) > 2:\n for entry in excinfo.traceback[1:-1]:\n entry.set_repr_style(\"short\")\n\n def repr_failure(self, excinfo, outerr=None):\n assert outerr is None, \"XXX outerr usage is deprecated\"\n style = self.config.getoption(\"tbstyle\", \"auto\")\n if style == \"auto\":\n style = \"long\"\n return self._repr_failure_py(excinfo, style=style)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_hasinit_CallSpec2.id.return._join_map_str_filter_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_hasinit_CallSpec2.id.return._join_map_str_filter_", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 852, "end_line": 902, "span_ids": ["CallSpec2._checkargnotcontained", "CallSpec2.copy", "hasnew", "CallSpec2.id", "hasinit", "CallSpec2", "CallSpec2.getparam"], "tokens": 358}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def hasinit(obj):\n init = getattr(obj, \"__init__\", None)\n if init:\n return init != object.__init__\n\n\ndef hasnew(obj):\n new = getattr(obj, \"__new__\", None)\n if new:\n return new != object.__new__\n\n\nclass CallSpec2(object):\n def __init__(self, metafunc):\n self.metafunc = metafunc\n self.funcargs = {}\n self._idlist = []\n self.params = {}\n self._globalid = NOTSET\n self._globalparam = NOTSET\n self._arg2scopenum = {} # used for sorting parametrized resources\n self.marks = []\n self.indices = {}\n\n def copy(self):\n cs = CallSpec2(self.metafunc)\n cs.funcargs.update(self.funcargs)\n cs.params.update(self.params)\n cs.marks.extend(self.marks)\n cs.indices.update(self.indices)\n cs._arg2scopenum.update(self._arg2scopenum)\n cs._idlist = list(self._idlist)\n cs._globalid = self._globalid\n cs._globalparam = self._globalparam\n return cs\n\n def _checkargnotcontained(self, arg):\n if arg in self.params or arg in self.funcargs:\n raise ValueError(\"duplicate %r\" % (arg,))\n\n def getparam(self, name):\n try:\n return self.params[name]\n except KeyError:\n if self._globalparam is NOTSET:\n raise ValueError(name)\n return self._globalparam\n\n @property\n def id(self):\n return \"-\".join(map(str, filter(None, self._idlist)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_CallSpec2.setmulti2_CallSpec2.setall.for_arg_in_funcargs_.self__arg2scopenum_arg_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_CallSpec2.setmulti2_CallSpec2.setall.for_arg_in_funcargs_.self__arg2scopenum_arg_", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 904, "end_line": 924, "span_ids": ["CallSpec2.setall", "CallSpec2.setmulti2"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CallSpec2(object):\n\n def setmulti2(self, valtypes, argnames, valset, id, marks, scopenum, param_index):\n for arg, val in zip(argnames, valset):\n self._checkargnotcontained(arg)\n valtype_for_arg = valtypes[arg]\n getattr(self, valtype_for_arg)[arg] = val\n self.indices[arg] = param_index\n self._arg2scopenum[arg] = scopenum\n self._idlist.append(id)\n self.marks.extend(normalize_mark_list(marks))\n\n def setall(self, funcargs, id, param):\n for x in funcargs:\n self._checkargnotcontained(x)\n self.funcargs.update(funcargs)\n if id is not NOTSET:\n self._idlist.append(id)\n if param is not NOTSET:\n assert self._globalparam is NOTSET\n self._globalparam = param\n for arg in funcargs:\n self._arg2scopenum[arg] = fixtures.scopenum_function", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Metafunc_Metafunc.__init__.self._arg2fixturedefs.fixtureinfo_name2fixtured": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Metafunc_Metafunc.__init__.self._arg2fixturedefs.fixtureinfo_name2fixtured", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 927, "end_line": 959, "span_ids": ["Metafunc"], "tokens": 259}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Metafunc(fixtures.FuncargnamesCompatAttr):\n \"\"\"\n Metafunc objects are passed to the :func:`pytest_generate_tests <_pytest.hookspec.pytest_generate_tests>` hook.\n They help to inspect a test function and to generate tests according to\n test configuration or values specified in the class or module where a\n test function is defined.\n \"\"\"\n\n def __init__(self, definition, fixtureinfo, config, cls=None, module=None):\n assert (\n isinstance(definition, FunctionDefinition)\n or type(definition).__name__ == \"DefinitionMock\"\n )\n self.definition = definition\n\n #: access to the :class:`_pytest.config.Config` object for the test session\n self.config = config\n\n #: the module object where the test function is defined in.\n self.module = module\n\n #: underlying python test function\n self.function = definition.obj\n\n #: set of fixture names required by the test function\n self.fixturenames = fixtureinfo.names_closure\n\n #: class object where the test function is defined in or ``None``.\n self.cls = cls\n\n self._calls = []\n self._ids = set()\n self._arg2fixturedefs = fixtureinfo.name2fixturedefs", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Metafunc.parametrize_Metafunc.parametrize.self._calls.newcalls": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Metafunc.parametrize_Metafunc.parametrize.self._calls.newcalls", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 961, "end_line": 1041, "span_ids": ["Metafunc.parametrize"], "tokens": 782}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Metafunc(fixtures.FuncargnamesCompatAttr):\n\n def parametrize(self, argnames, argvalues, indirect=False, ids=None, scope=None):\n \"\"\" Add new invocations to the underlying test function using the list\n of argvalues for the given argnames. Parametrization is performed\n during the collection phase. If you need to setup expensive resources\n see about setting indirect to do it rather at test setup time.\n\n :arg argnames: a comma-separated string denoting one or more argument\n names, or a list/tuple of argument strings.\n\n :arg argvalues: The list of argvalues determines how often a\n test is invoked with different argument values. If only one\n argname was specified argvalues is a list of values. If N\n argnames were specified, argvalues must be a list of N-tuples,\n where each tuple-element specifies a value for its respective\n argname.\n\n :arg indirect: The list of argnames or boolean. A list of arguments'\n names (subset of argnames). If True the list contains all names from\n the argnames. Each argvalue corresponding to an argname in this list will\n be passed as request.param to its respective argname fixture\n function so that it can perform more expensive setups during the\n setup phase of a test rather than at collection time.\n\n :arg ids: list of string ids, or a callable.\n If strings, each is corresponding to the argvalues so that they are\n part of the test id. If None is given as id of specific test, the\n automatically generated id for that argument will be used.\n If callable, it should take one argument (a single argvalue) and return\n a string or return None. If None, the automatically generated id for that\n argument will be used.\n If no ids are provided they will be generated automatically from\n the argvalues.\n\n :arg scope: if specified it denotes the scope of the parameters.\n The scope is used for grouping tests by parameter instances.\n It will also override any fixture-function defined scope, allowing\n to set a dynamic scope using test context or configuration.\n \"\"\"\n from _pytest.fixtures import scope2index\n from _pytest.mark import ParameterSet\n\n argnames, parameters = ParameterSet._for_parametrize(\n argnames,\n argvalues,\n self.function,\n self.config,\n function_definition=self.definition,\n )\n del argvalues\n\n if scope is None:\n scope = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect)\n\n self._validate_if_using_arg_names(argnames, indirect)\n\n arg_values_types = self._resolve_arg_value_types(argnames, indirect)\n\n ids = self._resolve_arg_ids(argnames, ids, parameters, item=self.definition)\n\n scopenum = scope2index(\n scope, descr=\"parametrize() call in {}\".format(self.function.__name__)\n )\n\n # create the new calls: if we are parametrize() multiple times (by applying the decorator\n # more than once) then we accumulate those calls generating the cartesian product\n # of all calls\n newcalls = []\n for callspec in self._calls or [CallSpec2(self)]:\n for param_index, (param_id, param_set) in enumerate(zip(ids, parameters)):\n newcallspec = callspec.copy()\n newcallspec.setmulti2(\n arg_values_types,\n argnames,\n param_set.values,\n param_id,\n param_set.marks,\n scopenum,\n param_index,\n )\n newcalls.append(newcallspec)\n self._calls = newcalls", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Metafunc._resolve_arg_ids_Metafunc._resolve_arg_ids.return.ids": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Metafunc._resolve_arg_ids_Metafunc._resolve_arg_ids.return.ids", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1043, "end_line": 1073, "span_ids": ["Metafunc._resolve_arg_ids"], "tokens": 351}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Metafunc(fixtures.FuncargnamesCompatAttr):\n\n def _resolve_arg_ids(self, argnames, ids, parameters, item):\n \"\"\"Resolves the actual ids for the given argnames, based on the ``ids`` parameter given\n to ``parametrize``.\n\n :param List[str] argnames: list of argument names passed to ``parametrize()``.\n :param ids: the ids parameter of the parametrized call (see docs).\n :param List[ParameterSet] parameters: the list of parameter values, same size as ``argnames``.\n :param Item item: the item that generated this parametrized call.\n :rtype: List[str]\n :return: the list of ids for each argname given\n \"\"\"\n from _pytest._io.saferepr import saferepr\n\n idfn = None\n if callable(ids):\n idfn = ids\n ids = None\n if ids:\n func_name = self.function.__name__\n if len(ids) != len(parameters):\n msg = \"In {}: {} parameter sets specified, with different number of ids: {}\"\n fail(msg.format(func_name, len(parameters), len(ids)), pytrace=False)\n for id_value in ids:\n if id_value is not None and not isinstance(id_value, six.string_types):\n msg = \"In {}: ids must be list of strings, found: {} (type: {!r})\"\n fail(\n msg.format(func_name, saferepr(id_value), type(id_value)),\n pytrace=False,\n )\n ids = idmaker(argnames, parameters, idfn, ids, self.config, item=item)\n return ids", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Metafunc._resolve_arg_value_types_Metafunc._resolve_arg_value_types.return.valtypes": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Metafunc._resolve_arg_value_types_Metafunc._resolve_arg_value_types.return.valtypes", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1075, "end_line": 1102, "span_ids": ["Metafunc._resolve_arg_value_types"], "tokens": 303}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Metafunc(fixtures.FuncargnamesCompatAttr):\n\n def _resolve_arg_value_types(self, argnames, indirect):\n \"\"\"Resolves if each parametrized argument must be considered a parameter to a fixture or a \"funcarg\"\n to the function, based on the ``indirect`` parameter of the parametrized() call.\n\n :param List[str] argnames: list of argument names passed to ``parametrize()``.\n :param indirect: same ``indirect`` parameter of ``parametrize()``.\n :rtype: Dict[str, str]\n A dict mapping each arg name to either:\n * \"params\" if the argname should be the parameter of a fixture of the same name.\n * \"funcargs\" if the argname should be a parameter to the parametrized test function.\n \"\"\"\n valtypes = {}\n if indirect is True:\n valtypes = dict.fromkeys(argnames, \"params\")\n elif indirect is False:\n valtypes = dict.fromkeys(argnames, \"funcargs\")\n elif isinstance(indirect, (tuple, list)):\n valtypes = dict.fromkeys(argnames, \"funcargs\")\n for arg in indirect:\n if arg not in argnames:\n fail(\n \"In {}: indirect fixture '{}' doesn't exist\".format(\n self.function.__name__, arg\n ),\n pytrace=False,\n )\n valtypes[arg] = \"params\"\n return valtypes", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Metafunc._validate_if_using_arg_names_Metafunc._validate_if_using_arg_names.for_arg_in_argnames_.if_arg_not_in_self_fixtur.if_arg_in_default_arg_nam.else_.fail_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Metafunc._validate_if_using_arg_names_Metafunc._validate_if_using_arg_names.for_arg_in_argnames_.if_arg_not_in_self_fixtur.if_arg_in_default_arg_nam.else_.fail_", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1104, "end_line": 1131, "span_ids": ["Metafunc._validate_if_using_arg_names"], "tokens": 252}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Metafunc(fixtures.FuncargnamesCompatAttr):\n\n def _validate_if_using_arg_names(self, argnames, indirect):\n \"\"\"\n Check if all argnames are being used, by default values, or directly/indirectly.\n\n :param List[str] argnames: list of argument names passed to ``parametrize()``.\n :param indirect: same ``indirect`` parameter of ``parametrize()``.\n :raise ValueError: if validation fails.\n \"\"\"\n default_arg_names = set(get_default_arg_names(self.function))\n func_name = self.function.__name__\n for arg in argnames:\n if arg not in self.fixturenames:\n if arg in default_arg_names:\n fail(\n \"In {}: function already takes an argument '{}' with a default value\".format(\n func_name, arg\n ),\n pytrace=False,\n )\n else:\n if isinstance(indirect, (tuple, list)):\n name = \"fixture\" if arg in indirect else \"argument\"\n else:\n name = \"fixture\" if indirect else \"argument\"\n fail(\n \"In {}: function uses no {} '{}'\".format(func_name, name, arg),\n pytrace=False,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__find_parametrized_scope__ascii_escaped_by_config.return.val_if_escape_option_else": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__find_parametrized_scope__ascii_escaped_by_config.return.val_if_escape_option_else", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1134, "end_line": 1174, "span_ids": ["_ascii_escaped_by_config", "_find_parametrized_scope"], "tokens": 314}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _find_parametrized_scope(argnames, arg2fixturedefs, indirect):\n \"\"\"Find the most appropriate scope for a parametrized call based on its arguments.\n\n When there's at least one direct argument, always use \"function\" scope.\n\n When a test function is parametrized and all its arguments are indirect\n (e.g. fixtures), return the most narrow scope based on the fixtures used.\n\n Related to issue #1832, based on code posted by @Kingdread.\n \"\"\"\n from _pytest.fixtures import scopes\n\n if isinstance(indirect, (list, tuple)):\n all_arguments_are_fixtures = len(indirect) == len(argnames)\n else:\n all_arguments_are_fixtures = bool(indirect)\n\n if all_arguments_are_fixtures:\n fixturedefs = arg2fixturedefs or {}\n used_scopes = [\n fixturedef[0].scope\n for name, fixturedef in fixturedefs.items()\n if name in argnames\n ]\n if used_scopes:\n # Takes the most narrow scope from used fixtures\n for scope in reversed(scopes):\n if scope in used_scopes:\n return scope\n\n return \"function\"\n\n\ndef _ascii_escaped_by_config(val, config):\n if config is None:\n escape_option = False\n else:\n escape_option = config.getini(\n \"disable_test_id_escaping_and_forfeit_all_rights_to_community_support\"\n )\n return val if escape_option else ascii_escaped(val)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__idval__idval.return.str_argname_str_idx_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__idval__idval.return.str_argname_str_idx_", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1177, "end_line": 1207, "span_ids": ["_idval"], "tokens": 307}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _idval(val, argname, idx, idfn, item, config):\n if idfn:\n try:\n generated_id = idfn(val)\n if generated_id is not None:\n val = generated_id\n except Exception as e:\n # See issue https://github.com/pytest-dev/pytest/issues/2169\n msg = \"{}: error raised while trying to determine id of parameter '{}' at position {}\\n\"\n msg = msg.format(item.nodeid, argname, idx)\n # we only append the exception type and message because on Python 2 reraise does nothing\n msg += \" {}: {}\\n\".format(type(e).__name__, e)\n six.raise_from(ValueError(msg), e)\n elif config:\n hook_id = config.hook.pytest_make_parametrize_id(\n config=config, val=val, argname=argname\n )\n if hook_id:\n return hook_id\n\n if isinstance(val, STRING_TYPES):\n return _ascii_escaped_by_config(val, config)\n elif isinstance(val, (float, int, bool, NoneType)):\n return str(val)\n elif isinstance(val, REGEX_TYPE):\n return ascii_escaped(val.pattern)\n elif enum is not None and isinstance(val, enum.Enum):\n return str(val)\n elif (isclass(val) or isfunction(val)) and hasattr(val, \"__name__\"):\n return val.__name__\n return str(argname) + str(idx)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__idvalset__idvalset.if_ids_is_None_or_idx_.else_.return.ascii_escaped_ids_idx_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__idvalset__idvalset.if_ids_is_None_or_idx_.else_.return.ascii_escaped_ids_idx_", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1210, "end_line": 1220, "span_ids": ["_idvalset"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _idvalset(idx, parameterset, argnames, idfn, ids, item, config):\n if parameterset.id is not None:\n return parameterset.id\n if ids is None or (idx >= len(ids) or ids[idx] is None):\n this_id = [\n _idval(val, argname, idx, idfn, item=item, config=config)\n for val, argname in zip(parameterset.values, argnames)\n ]\n return \"-\".join(this_id)\n else:\n return ascii_escaped(ids[idx])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_idmaker_show_fixtures_per_test.return.wrap_session_config__sho": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_idmaker_show_fixtures_per_test.return.wrap_session_config__sho", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1223, "end_line": 1242, "span_ids": ["show_fixtures_per_test", "idmaker"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def idmaker(argnames, parametersets, idfn=None, ids=None, config=None, item=None):\n ids = [\n _idvalset(valindex, parameterset, argnames, idfn, ids, config=config, item=item)\n for valindex, parameterset in enumerate(parametersets)\n ]\n if len(set(ids)) != len(ids):\n # The ids are not unique\n duplicates = [testid for testid in ids if ids.count(testid) > 1]\n counters = collections.defaultdict(lambda: 0)\n for index, testid in enumerate(ids):\n if testid in duplicates:\n ids[index] = testid + str(counters[testid])\n counters[testid] += 1\n return ids\n\n\ndef show_fixtures_per_test(config):\n from _pytest.main import wrap_session\n\n return wrap_session(config, _show_fixtures_per_test)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__show_fixtures_per_test__show_fixtures_per_test.write_fixture.if_fixture_doc_.else_.tw_line_no_docstring": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__show_fixtures_per_test__show_fixtures_per_test.write_fixture.if_fixture_doc_.else_.tw_line_no_docstring", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1245, "end_line": 1271, "span_ids": ["_show_fixtures_per_test"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _show_fixtures_per_test(config, session):\n import _pytest.config\n\n session.perform_collect()\n curdir = py.path.local()\n tw = _pytest.config.create_terminal_writer(config)\n verbose = config.getvalue(\"verbose\")\n\n def get_best_relpath(func):\n loc = getlocation(func, curdir)\n return curdir.bestrelpath(loc)\n\n def write_fixture(fixture_def):\n argname = fixture_def.argname\n if verbose <= 0 and argname.startswith(\"_\"):\n return\n if verbose > 0:\n bestrel = get_best_relpath(fixture_def.func)\n funcargspec = \"{} -- {}\".format(argname, bestrel)\n else:\n funcargspec = argname\n tw.line(funcargspec, green=True)\n fixture_doc = fixture_def.func.__doc__\n if fixture_doc:\n write_docstring(tw, fixture_doc)\n else:\n tw.line(\" no docstring available\", red=True)\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__show_fixtures_per_test.write_item_showfixtures.return.wrap_session_config__sho": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__show_fixtures_per_test.write_item_showfixtures.return.wrap_session_config__sho", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1273, "end_line": 1300, "span_ids": ["_show_fixtures_per_test", "showfixtures"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _show_fixtures_per_test(config, session):\n # ... other code\n\n def write_item(item):\n try:\n info = item._fixtureinfo\n except AttributeError:\n # doctests items have no _fixtureinfo attribute\n return\n if not info.name2fixturedefs:\n # this test item does not use any fixtures\n return\n tw.line()\n tw.sep(\"-\", \"fixtures used by {}\".format(item.name))\n tw.sep(\"-\", \"({})\".format(get_best_relpath(item.function)))\n # dict key not used in loop but needed for sorting\n for _, fixturedefs in sorted(info.name2fixturedefs.items()):\n assert fixturedefs is not None\n if not fixturedefs:\n continue\n # last item is expected to be the one used by the test item\n write_fixture(fixturedefs[-1])\n\n for session_item in session.items:\n write_item(session_item)\n\n\ndef showfixtures(config):\n from _pytest.main import wrap_session\n\n return wrap_session(config, _showfixtures_main)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__showfixtures_main_write_docstring.if_rest_.for_line_in_dedent_rest_.tw_write_indent_line_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py__showfixtures_main_write_docstring.if_rest_.for_line_in_dedent_rest_.tw_write_indent_line_", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1303, "end_line": 1370, "span_ids": ["_showfixtures_main", "write_docstring"], "tokens": 474}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _showfixtures_main(config, session):\n import _pytest.config\n\n session.perform_collect()\n curdir = py.path.local()\n tw = _pytest.config.create_terminal_writer(config)\n verbose = config.getvalue(\"verbose\")\n\n fm = session._fixturemanager\n\n available = []\n seen = set()\n\n for argname, fixturedefs in fm._arg2fixturedefs.items():\n assert fixturedefs is not None\n if not fixturedefs:\n continue\n for fixturedef in fixturedefs:\n loc = getlocation(fixturedef.func, curdir)\n if (fixturedef.argname, loc) in seen:\n continue\n seen.add((fixturedef.argname, loc))\n available.append(\n (\n len(fixturedef.baseid),\n fixturedef.func.__module__,\n curdir.bestrelpath(loc),\n fixturedef.argname,\n fixturedef,\n )\n )\n\n available.sort()\n currentmodule = None\n for baseid, module, bestrel, argname, fixturedef in available:\n if currentmodule != module:\n if not module.startswith(\"_pytest.\"):\n tw.line()\n tw.sep(\"-\", \"fixtures defined from %s\" % (module,))\n currentmodule = module\n if verbose <= 0 and argname[0] == \"_\":\n continue\n if verbose > 0:\n funcargspec = \"%s -- %s\" % (argname, bestrel)\n else:\n funcargspec = argname\n tw.line(funcargspec, green=True)\n loc = getlocation(fixturedef.func, curdir)\n doc = fixturedef.func.__doc__ or \"\"\n if doc:\n write_docstring(tw, doc)\n else:\n tw.line(\" %s: no docstring available\" % (loc,), red=True)\n\n\ndef write_docstring(tw, doc, indent=\" \"):\n doc = doc.rstrip()\n if \"\\n\" in doc:\n firstline, rest = doc.split(\"\\n\", 1)\n else:\n firstline, rest = doc, \"\"\n\n if firstline.strip():\n tw.line(indent + firstline.strip())\n\n if rest:\n for line in dedent(rest).split(\"\\n\"):\n tw.write(indent + line + \"\\n\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Function_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python.py_Function_", "embedding": null, "metadata": {"file_path": "src/_pytest/python.py", "file_name": "python.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1375, "end_line": 1484, "span_ids": ["Function._initrequest", "Function._getobj", "FunctionDefinition.runtest", "FunctionDefinition:3", "FunctionDefinition", "Function._pyfuncitem", "Function.runtest", "Function.setup", "Function.function", "Function"], "tokens": 700}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Function(FunctionMixin, nodes.Item, fixtures.FuncargnamesCompatAttr):\n \"\"\" a Function Item is responsible for setting up and executing a\n Python test function.\n \"\"\"\n\n # disable since functions handle it themselves\n _ALLOW_MARKERS = False\n\n def __init__(\n self,\n name,\n parent,\n args=None,\n config=None,\n callspec=None,\n callobj=NOTSET,\n keywords=None,\n session=None,\n fixtureinfo=None,\n originalname=None,\n ):\n super(Function, self).__init__(name, parent, config=config, session=session)\n self._args = args\n if callobj is not NOTSET:\n self.obj = callobj\n\n self.keywords.update(self.obj.__dict__)\n self.own_markers.extend(get_unpacked_marks(self.obj))\n if callspec:\n self.callspec = callspec\n # this is total hostile and a mess\n # keywords are broken by design by now\n # this will be redeemed later\n for mark in callspec.marks:\n # feel free to cry, this was broken for years before\n # and keywords cant fix it per design\n self.keywords[mark.name] = mark\n self.own_markers.extend(normalize_mark_list(callspec.marks))\n if keywords:\n self.keywords.update(keywords)\n\n # todo: this is a hell of a hack\n # https://github.com/pytest-dev/pytest/issues/4569\n\n self.keywords.update(\n dict.fromkeys(\n [\n mark.name\n for mark in self.iter_markers()\n if mark.name not in self.keywords\n ],\n True,\n )\n )\n\n if fixtureinfo is None:\n fixtureinfo = self.session._fixturemanager.getfixtureinfo(\n self, self.obj, self.cls, funcargs=True\n )\n self._fixtureinfo = fixtureinfo\n self.fixturenames = fixtureinfo.names_closure\n self._initrequest()\n\n #: original function name, without any decorations (for example\n #: parametrization adds a ``\"[...]\"`` suffix to function names).\n #:\n #: .. versionadded:: 3.0\n self.originalname = originalname\n\n def _initrequest(self):\n self.funcargs = {}\n self._request = fixtures.FixtureRequest(self)\n\n @property\n def function(self):\n \"underlying python 'function' object\"\n return getimfunc(self.obj)\n\n def _getobj(self):\n name = self.name\n i = name.find(\"[\") # parametrization\n if i != -1:\n name = name[:i]\n return getattr(self.parent.obj, name)\n\n @property\n def _pyfuncitem(self):\n \"(compatonly) for code expecting pytest-2.2 style request objects\"\n return self\n\n def runtest(self):\n \"\"\" execute the underlying test function. \"\"\"\n self.ihook.pytest_pyfunc_call(pyfuncitem=self)\n\n def setup(self):\n super(Function, self).setup()\n fixtures.fillfixtures(self)\n\n\nclass FunctionDefinition(Function):\n \"\"\"\n internal hack until we get actual definition nodes instead of the\n crappy metafunc hack\n \"\"\"\n\n def runtest(self):\n raise RuntimeError(\"function definitions are not supposed to be used\")\n\n setup = runtest", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_from___future___import_ab__non_numeric_type_error.return.TypeError_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_from___future___import_ab__non_numeric_type_error.return.TypeError_", "embedding": null, "metadata": {"file_path": "src/_pytest/python_api.py", "file_name": "python_api.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 47, "span_ids": ["imports", "_non_numeric_type_error", "_cmp_raises_type_error", "impl"], "tokens": 278}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\n\nimport math\nimport pprint\nimport sys\nimport warnings\nfrom decimal import Decimal\nfrom numbers import Number\n\nimport six\nfrom more_itertools.more import always_iterable\nfrom six.moves import filterfalse\nfrom six.moves import zip\n\nimport _pytest._code\nfrom _pytest import deprecated\nfrom _pytest.compat import isclass\nfrom _pytest.compat import Iterable\nfrom _pytest.compat import Mapping\nfrom _pytest.compat import Sized\nfrom _pytest.compat import STRING_TYPES\nfrom _pytest.outcomes import fail\n\nBASE_TYPE = (type, STRING_TYPES)\n\n\ndef _cmp_raises_type_error(self, other):\n \"\"\"__cmp__ implementation which raises TypeError. Used\n by Approx base classes to implement only == and != and raise a\n TypeError for other comparisons.\n\n Needed in Python 2 only, Python 3 all it takes is not implementing the\n other operators at all.\n \"\"\"\n __tracebackhide__ = True\n raise TypeError(\n \"Comparison operators other than == and != not supported by approx objects\"\n )\n\n\ndef _non_numeric_type_error(value, at):\n at_str = \" at {}\".format(at) if at else \"\"\n return TypeError(\n \"cannot make approximate comparisons to non-numeric values: {!r} {}\".format(\n value, at_str\n )\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py__builtin_pytest_approx_h_ApproxBase._check_type.pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py__builtin_pytest_approx_h_ApproxBase._check_type.pass", "embedding": null, "metadata": {"file_path": "src/_pytest/python_api.py", "file_name": "python_api.py", "file_type": "text/x-python", "category": "implementation", "start_line": 49, "end_line": 105, "span_ids": ["ApproxBase:9", "_non_numeric_type_error", "ApproxBase._yield_comparisons", "ApproxBase", "ApproxBase.__repr__", "ApproxBase._check_type", "ApproxBase.__eq__", "ApproxBase._approx_scalar", "ApproxBase:7", "ApproxBase.__ne__"], "tokens": 411}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# builtin pytest.approx helper\n\n\nclass ApproxBase(object):\n \"\"\"\n Provide shared utilities for making approximate comparisons between numbers\n or sequences of numbers.\n \"\"\"\n\n # Tell numpy to use our `__eq__` operator instead of its.\n __array_ufunc__ = None\n __array_priority__ = 100\n\n def __init__(self, expected, rel=None, abs=None, nan_ok=False):\n __tracebackhide__ = True\n self.expected = expected\n self.abs = abs\n self.rel = rel\n self.nan_ok = nan_ok\n self._check_type()\n\n def __repr__(self):\n raise NotImplementedError\n\n def __eq__(self, actual):\n return all(\n a == self._approx_scalar(x) for a, x in self._yield_comparisons(actual)\n )\n\n __hash__ = None\n\n def __ne__(self, actual):\n return not (actual == self)\n\n if sys.version_info[0] == 2:\n __cmp__ = _cmp_raises_type_error\n\n def _approx_scalar(self, x):\n return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok)\n\n def _yield_comparisons(self, actual):\n \"\"\"\n Yield all the pairs of numbers to be compared. This is used to\n implement the `__eq__` method.\n \"\"\"\n raise NotImplementedError\n\n def _check_type(self):\n \"\"\"\n Raise a TypeError if the expected value is not a valid type.\n \"\"\"\n # This is only a concern if the expected value is a sequence. In every\n # other case, the approx() function ensures that the expected value has\n # a numeric type. For this reason, the default is to do nothing. The\n # classes that deal with sequences should reimplement this method to\n # raise if there are any non-numeric elements in the sequence.\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py__recursive_list_map_ApproxNumpy.__eq__.return.ApproxBase___eq___self_a": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py__recursive_list_map_ApproxNumpy.__eq__.return.ApproxBase___eq___self_a", "embedding": null, "metadata": {"file_path": "src/_pytest/python_api.py", "file_name": "python_api.py", "file_type": "text/x-python", "category": "implementation", "start_line": 108, "end_line": 141, "span_ids": ["_recursive_list_map", "ApproxNumpy", "ApproxNumpy.__repr__", "ApproxNumpy:3", "ApproxNumpy.__eq__"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _recursive_list_map(f, x):\n if isinstance(x, list):\n return list(_recursive_list_map(f, xi) for xi in x)\n else:\n return f(x)\n\n\nclass ApproxNumpy(ApproxBase):\n \"\"\"\n Perform approximate comparisons where the expected value is numpy array.\n \"\"\"\n\n def __repr__(self):\n list_scalars = _recursive_list_map(self._approx_scalar, self.expected.tolist())\n return \"approx({!r})\".format(list_scalars)\n\n if sys.version_info[0] == 2:\n __cmp__ = _cmp_raises_type_error\n\n def __eq__(self, actual):\n import numpy as np\n\n # self.expected is supposed to always be an array here\n\n if not np.isscalar(actual):\n try:\n actual = np.asarray(actual)\n except: # noqa\n raise TypeError(\"cannot compare '{}' to numpy.ndarray\".format(actual))\n\n if not np.isscalar(actual) and actual.shape != self.expected.shape:\n return False\n\n return ApproxBase.__eq__(self, actual)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_ApproxNumpy._yield_comparisons_ApproxNumpy._yield_comparisons.if_np_isscalar_actual_.else_.for_i_in_np_ndindex_self_.yield_actual_i_item_s": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_ApproxNumpy._yield_comparisons_ApproxNumpy._yield_comparisons.if_np_isscalar_actual_.else_.for_i_in_np_ndindex_self_.yield_actual_i_item_s", "embedding": null, "metadata": {"file_path": "src/_pytest/python_api.py", "file_name": "python_api.py", "file_type": "text/x-python", "category": "implementation", "start_line": 143, "end_line": 155, "span_ids": ["ApproxNumpy._yield_comparisons"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ApproxNumpy(ApproxBase):\n\n def _yield_comparisons(self, actual):\n import numpy as np\n\n # `actual` can either be a numpy array or a scalar, it is treated in\n # `__eq__` before being passed to `ApproxBase.__eq__`, which is the\n # only method that calls this one.\n\n if np.isscalar(actual):\n for i in np.ndindex(self.expected.shape):\n yield actual, self.expected[i].item()\n else:\n for i in np.ndindex(self.expected.shape):\n yield actual[i].item(), self.expected[i].item()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_ApproxMapping_ApproxMapping._check_type.for_key_value_in_self_ex.if_isinstance_value_type.elif_not_isinstance_value.raise__non_numeric_type_e": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_ApproxMapping_ApproxMapping._check_type.for_key_value_in_self_ex.if_isinstance_value_type.elif_not_isinstance_value.raise__non_numeric_type_e", "embedding": null, "metadata": {"file_path": "src/_pytest/python_api.py", "file_name": "python_api.py", "file_type": "text/x-python", "category": "implementation", "start_line": 158, "end_line": 186, "span_ids": ["ApproxMapping", "ApproxMapping.__repr__", "ApproxMapping._yield_comparisons", "ApproxMapping.__eq__", "ApproxMapping._check_type"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ApproxMapping(ApproxBase):\n \"\"\"\n Perform approximate comparisons where the expected value is a mapping with\n numeric values (the keys can be anything).\n \"\"\"\n\n def __repr__(self):\n return \"approx({!r})\".format(\n {k: self._approx_scalar(v) for k, v in self.expected.items()}\n )\n\n def __eq__(self, actual):\n if set(actual.keys()) != set(self.expected.keys()):\n return False\n\n return ApproxBase.__eq__(self, actual)\n\n def _yield_comparisons(self, actual):\n for k in self.expected.keys():\n yield actual[k], self.expected[k]\n\n def _check_type(self):\n __tracebackhide__ = True\n for key, value in self.expected.items():\n if isinstance(value, type(self.expected)):\n msg = \"pytest.approx() does not support nested dictionaries: key={!r} value={!r}\\n full mapping={}\"\n raise TypeError(msg.format(key, value, pprint.pformat(self.expected)))\n elif not isinstance(value, Number):\n raise _non_numeric_type_error(self.expected, at=\"key={!r}\".format(key))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_ApproxSequencelike_ApproxSequencelike._check_type.for_index_x_in_enumerate.if_isinstance_x_type_sel.elif_not_isinstance_x_Nu.raise__non_numeric_type_e": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_ApproxSequencelike_ApproxSequencelike._check_type.for_index_x_in_enumerate.if_isinstance_x_type_sel.elif_not_isinstance_x_Nu.raise__non_numeric_type_e", "embedding": null, "metadata": {"file_path": "src/_pytest/python_api.py", "file_name": "python_api.py", "file_type": "text/x-python", "category": "implementation", "start_line": 189, "end_line": 220, "span_ids": ["ApproxSequencelike.__repr__", "ApproxSequencelike.__eq__", "ApproxSequencelike", "ApproxSequencelike._yield_comparisons", "ApproxSequencelike._check_type"], "tokens": 253}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ApproxSequencelike(ApproxBase):\n \"\"\"\n Perform approximate comparisons where the expected value is a sequence of\n numbers.\n \"\"\"\n\n def __repr__(self):\n seq_type = type(self.expected)\n if seq_type not in (tuple, list, set):\n seq_type = list\n return \"approx({!r})\".format(\n seq_type(self._approx_scalar(x) for x in self.expected)\n )\n\n def __eq__(self, actual):\n if len(actual) != len(self.expected):\n return False\n return ApproxBase.__eq__(self, actual)\n\n def _yield_comparisons(self, actual):\n return zip(actual, self.expected)\n\n def _check_type(self):\n __tracebackhide__ = True\n for index, x in enumerate(self.expected):\n if isinstance(x, type(self.expected)):\n msg = \"pytest.approx() does not support nested data structures: {!r} at index {}\\n full sequence: {}\"\n raise TypeError(msg.format(x, index, pprint.pformat(self.expected)))\n elif not isinstance(x, Number):\n raise _non_numeric_type_error(\n self.expected, at=\"index {}\".format(index)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_ApproxScalar_ApproxScalar.__repr__.if_sys_version_info_0_.else_.return.u_u00b1_format_se": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_ApproxScalar_ApproxScalar.__repr__.if_sys_version_info_0_.else_.return.u_u00b1_format_se", "embedding": null, "metadata": {"file_path": "src/_pytest/python_api.py", "file_name": "python_api.py", "file_type": "text/x-python", "category": "implementation", "start_line": 223, "end_line": 256, "span_ids": ["ApproxScalar", "ApproxScalar.__repr__"], "tokens": 285}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ApproxScalar(ApproxBase):\n \"\"\"\n Perform approximate comparisons where the expected value is a single number.\n \"\"\"\n\n DEFAULT_ABSOLUTE_TOLERANCE = 1e-12\n DEFAULT_RELATIVE_TOLERANCE = 1e-6\n\n def __repr__(self):\n \"\"\"\n Return a string communicating both the expected value and the tolerance\n for the comparison being made, e.g. '1.0 +- 1e-6'. Use the unicode\n plus/minus symbol if this is python3 (it's too hard to get right for\n python2).\n \"\"\"\n if isinstance(self.expected, complex):\n return str(self.expected)\n\n # Infinities aren't compared using tolerances, so don't show a\n # tolerance.\n if math.isinf(self.expected):\n return str(self.expected)\n\n # If a sensible tolerance can't be calculated, self.tolerance will\n # raise a ValueError. In this case, display '???'.\n try:\n vetted_tolerance = \"{:.1e}\".format(self.tolerance)\n except ValueError:\n vetted_tolerance = \"???\"\n\n if sys.version_info[0] == 2:\n return \"{} +- {}\".format(self.expected, vetted_tolerance)\n else:\n return u\"{} \\u00b1 {}\".format(self.expected, vetted_tolerance)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_ApproxScalar.__eq___ApproxScalar.__hash__.None": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_ApproxScalar.__eq___ApproxScalar.__hash__.None", "embedding": null, "metadata": {"file_path": "src/_pytest/python_api.py", "file_name": "python_api.py", "file_type": "text/x-python", "category": "implementation", "start_line": 258, "end_line": 290, "span_ids": ["ApproxScalar.__eq__", "ApproxScalar:7"], "tokens": 306}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ApproxScalar(ApproxBase):\n\n def __eq__(self, actual):\n \"\"\"\n Return true if the given value is equal to the expected value within\n the pre-specified tolerance.\n \"\"\"\n if _is_numpy_array(actual):\n # Call ``__eq__()`` manually to prevent infinite-recursion with\n # numpy<1.13. See #3748.\n return all(self.__eq__(a) for a in actual.flat)\n\n # Short-circuit exact equality.\n if actual == self.expected:\n return True\n\n # Allow the user to control whether NaNs are considered equal to each\n # other or not. The abs() calls are for compatibility with complex\n # numbers.\n if math.isnan(abs(self.expected)):\n return self.nan_ok and math.isnan(abs(actual))\n\n # Infinity shouldn't be approximately equal to anything but itself, but\n # if there's a relative tolerance, it will be infinite and infinity\n # will seem approximately equal to everything. The equal-to-itself\n # case would have been short circuited above, so here we can just\n # return false if the expected value is infinite. The abs() call is\n # for compatibility with complex numbers.\n if math.isinf(abs(self.expected)):\n return False\n\n # Return true if the two numbers are within the tolerance.\n return abs(self.expected - actual) <= self.tolerance\n\n __hash__ = None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_ApproxScalar.tolerance_approx": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_ApproxScalar.tolerance_approx", "embedding": null, "metadata": {"file_path": "src/_pytest/python_api.py", "file_name": "python_api.py", "file_type": "text/x-python", "category": "implementation", "start_line": 292, "end_line": 536, "span_ids": ["ApproxScalar.tolerance", "approx", "ApproxDecimal"], "tokens": 461}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ApproxScalar(ApproxBase):\n\n @property\n def tolerance(self):\n \"\"\"\n Return the tolerance for the comparison. This could be either an\n absolute tolerance or a relative tolerance, depending on what the user\n specified or which would be larger.\n \"\"\"\n\n def set_default(x, default):\n return x if x is not None else default\n\n # Figure out what the absolute tolerance should be. ``self.abs`` is\n # either None or a value specified by the user.\n absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE)\n\n if absolute_tolerance < 0:\n raise ValueError(\n \"absolute tolerance can't be negative: {}\".format(absolute_tolerance)\n )\n if math.isnan(absolute_tolerance):\n raise ValueError(\"absolute tolerance can't be NaN.\")\n\n # If the user specified an absolute tolerance but not a relative one,\n # just return the absolute tolerance.\n if self.rel is None:\n if self.abs is not None:\n return absolute_tolerance\n\n # Figure out what the relative tolerance should be. ``self.rel`` is\n # either None or a value specified by the user. This is done after\n # we've made sure the user didn't ask for an absolute tolerance only,\n # because we don't want to raise errors about the relative tolerance if\n # we aren't even going to use it.\n relative_tolerance = set_default(\n self.rel, self.DEFAULT_RELATIVE_TOLERANCE\n ) * abs(self.expected)\n\n if relative_tolerance < 0:\n raise ValueError(\n \"relative tolerance can't be negative: {}\".format(absolute_tolerance)\n )\n if math.isnan(relative_tolerance):\n raise ValueError(\"relative tolerance can't be NaN.\")\n\n # Return the larger of the relative and absolute tolerances.\n return max(relative_tolerance, absolute_tolerance)\n\n\nclass ApproxDecimal(ApproxScalar):\n \"\"\"\n Perform approximate comparisons where the expected value is a decimal.\n \"\"\"\n\n DEFAULT_ABSOLUTE_TOLERANCE = Decimal(\"1e-12\")\n DEFAULT_RELATIVE_TOLERANCE = Decimal(\"1e-6\")\n\n\ndef approx(expected, rel=None, abs=None, nan_ok=False):\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_approx.__approx._": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_approx.__approx._", "embedding": null, "metadata": {"file_path": "src/_pytest/python_api.py", "file_name": "python_api.py", "file_type": "text/x-python", "category": "implementation", "start_line": 350, "end_line": 500, "span_ids": ["approx"], "tokens": 1903}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def approx(expected, rel=None, abs=None, nan_ok=False):\n \"\"\"\n Assert that two numbers (or two sets of numbers) are equal to each other\n within some tolerance.\n\n Due to the `intricacies of floating-point arithmetic`__, numbers that we\n would intuitively expect to be equal are not always so::\n\n >>> 0.1 + 0.2 == 0.3\n False\n\n __ https://docs.python.org/3/tutorial/floatingpoint.html\n\n This problem is commonly encountered when writing tests, e.g. when making\n sure that floating-point values are what you expect them to be. One way to\n deal with this problem is to assert that two floating-point numbers are\n equal to within some appropriate tolerance::\n\n >>> abs((0.1 + 0.2) - 0.3) < 1e-6\n True\n\n However, comparisons like this are tedious to write and difficult to\n understand. Furthermore, absolute comparisons like the one above are\n usually discouraged because there's no tolerance that works well for all\n situations. ``1e-6`` is good for numbers around ``1``, but too small for\n very big numbers and too big for very small ones. It's better to express\n the tolerance as a fraction of the expected value, but relative comparisons\n like that are even more difficult to write correctly and concisely.\n\n The ``approx`` class performs floating-point comparisons using a syntax\n that's as intuitive as possible::\n\n >>> from pytest import approx\n >>> 0.1 + 0.2 == approx(0.3)\n True\n\n The same syntax also works for sequences of numbers::\n\n >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6))\n True\n\n Dictionary *values*::\n\n >>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6})\n True\n\n ``numpy`` arrays::\n\n >>> import numpy as np # doctest: +SKIP\n >>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP\n True\n\n And for a ``numpy`` array against a scalar::\n\n >>> import numpy as np # doctest: +SKIP\n >>> np.array([0.1, 0.2]) + np.array([0.2, 0.1]) == approx(0.3) # doctest: +SKIP\n True\n\n By default, ``approx`` considers numbers within a relative tolerance of\n ``1e-6`` (i.e. one part in a million) of its expected value to be equal.\n This treatment would lead to surprising results if the expected value was\n ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``.\n To handle this case less surprisingly, ``approx`` also considers numbers\n within an absolute tolerance of ``1e-12`` of its expected value to be\n equal. Infinity and NaN are special cases. Infinity is only considered\n equal to itself, regardless of the relative tolerance. NaN is not\n considered equal to anything by default, but you can make it be equal to\n itself by setting the ``nan_ok`` argument to True. (This is meant to\n facilitate comparing arrays that use NaN to mean \"no data\".)\n\n Both the relative and absolute tolerances can be changed by passing\n arguments to the ``approx`` constructor::\n\n >>> 1.0001 == approx(1)\n False\n >>> 1.0001 == approx(1, rel=1e-3)\n True\n >>> 1.0001 == approx(1, abs=1e-3)\n True\n\n If you specify ``abs`` but not ``rel``, the comparison will not consider\n the relative tolerance at all. In other words, two numbers that are within\n the default relative tolerance of ``1e-6`` will still be considered unequal\n if they exceed the specified absolute tolerance. If you specify both\n ``abs`` and ``rel``, the numbers will be considered equal if either\n tolerance is met::\n\n >>> 1 + 1e-8 == approx(1)\n True\n >>> 1 + 1e-8 == approx(1, abs=1e-12)\n False\n >>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12)\n True\n\n If you're thinking about using ``approx``, then you might want to know how\n it compares to other good ways of comparing floating-point numbers. All of\n these algorithms are based on relative and absolute tolerances and should\n agree for the most part, but they do have meaningful differences:\n\n - ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative\n tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute\n tolerance is met. Because the relative tolerance is calculated w.r.t.\n both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor\n ``b`` is a \"reference value\"). You have to specify an absolute tolerance\n if you want to compare to ``0.0`` because there is no tolerance by\n default. Only available in python>=3.5. `More information...`__\n\n __ https://docs.python.org/3/library/math.html#math.isclose\n\n - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference\n between ``a`` and ``b`` is less that the sum of the relative tolerance\n w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance\n is only calculated w.r.t. ``b``, this test is asymmetric and you can\n think of ``b`` as the reference value. Support for comparing sequences\n is provided by ``numpy.allclose``. `More information...`__\n\n __ http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.isclose.html\n\n - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b``\n are within an absolute tolerance of ``1e-7``. No relative tolerance is\n considered and the absolute tolerance cannot be changed, so this function\n is not appropriate for very large or very small numbers. Also, it's only\n available in subclasses of ``unittest.TestCase`` and it's ugly because it\n doesn't follow PEP8. `More information...`__\n\n __ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual\n\n - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative\n tolerance is met w.r.t. ``b`` or if the absolute tolerance is met.\n Because the relative tolerance is only calculated w.r.t. ``b``, this test\n is asymmetric and you can think of ``b`` as the reference value. In the\n special case that you explicitly specify an absolute tolerance but not a\n relative tolerance, only the absolute tolerance is considered.\n\n .. warning::\n\n .. versionchanged:: 3.2\n\n In order to avoid inconsistent behavior, ``TypeError`` is\n raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons.\n The example below illustrates the problem::\n\n assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10)\n assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10)\n\n In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)``\n to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to\n comparison. This is because the call hierarchy of rich comparisons\n follows a fixed behavior. `More information...`__\n\n __ https://docs.python.org/3/reference/datamodel.html#object.__ge__\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_approx._Delegate_the_comparison__is_numpy_array.return.False": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_approx._Delegate_the_comparison__is_numpy_array.return.False", "embedding": null, "metadata": {"file_path": "src/_pytest/python_api.py", "file_name": "python_api.py", "file_type": "text/x-python", "category": "implementation", "start_line": 502, "end_line": 549, "span_ids": ["_is_numpy_array", "approx"], "tokens": 408}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def approx(expected, rel=None, abs=None, nan_ok=False):\n\n # Delegate the comparison to a class that knows how to deal with the type\n # of the expected value (e.g. int, float, list, dict, numpy.array, etc).\n #\n # The primary responsibility of these classes is to implement ``__eq__()``\n # and ``__repr__()``. The former is used to actually check if some\n # \"actual\" value is equivalent to the given expected value within the\n # allowed tolerance. The latter is used to show the user the expected\n # value and tolerance, in the case that a test failed.\n #\n # The actual logic for making approximate comparisons can be found in\n # ApproxScalar, which is used to compare individual numbers. All of the\n # other Approx classes eventually delegate to this class. The ApproxBase\n # class provides some convenient methods and overloads, but isn't really\n # essential.\n\n __tracebackhide__ = True\n\n if isinstance(expected, Decimal):\n cls = ApproxDecimal\n elif isinstance(expected, Number):\n cls = ApproxScalar\n elif isinstance(expected, Mapping):\n cls = ApproxMapping\n elif _is_numpy_array(expected):\n cls = ApproxNumpy\n elif (\n isinstance(expected, Iterable)\n and isinstance(expected, Sized)\n and not isinstance(expected, STRING_TYPES)\n ):\n cls = ApproxSequencelike\n else:\n raise _non_numeric_type_error(expected, at=None)\n\n return cls(expected, rel, abs, nan_ok)\n\n\ndef _is_numpy_array(obj):\n \"\"\"\n Return true if the given object is a numpy array. Make a special effort to\n avoid importing numpy unless it's really necessary.\n \"\"\"\n import sys\n\n np = sys.modules.get(\"numpy\")\n if np is not None:\n return isinstance(obj, np.ndarray)\n return False", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py__builtin_pytest_raises_h_raises.r_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py__builtin_pytest_raises_h_raises.r_", "embedding": null, "metadata": {"file_path": "src/_pytest/python_api.py", "file_name": "python_api.py", "file_type": "text/x-python", "category": "implementation", "start_line": 553, "end_line": 665, "span_ids": ["_is_numpy_array", "raises"], "tokens": 932}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# builtin pytest.raises helper\n\n\ndef raises(expected_exception, *args, **kwargs):\n r\"\"\"\n Assert that a code block/function call raises ``expected_exception``\n or raise a failure exception otherwise.\n\n :kwparam match: if specified, asserts that the exception matches a text or regex\n\n :kwparam message: **(deprecated since 4.1)** if specified, provides a custom failure message\n if the exception is not raised\n\n .. currentmodule:: _pytest._code\n\n Use ``pytest.raises`` as a context manager, which will capture the exception of the given\n type::\n\n >>> with raises(ZeroDivisionError):\n ... 1/0\n\n If the code block does not raise the expected exception (``ZeroDivisionError`` in the example\n above), or no exception at all, the check will fail instead.\n\n You can also use the keyword argument ``match`` to assert that the\n exception matches a text or regex::\n\n >>> with raises(ValueError, match='must be 0 or None'):\n ... raise ValueError(\"value must be 0 or None\")\n\n >>> with raises(ValueError, match=r'must be \\d+$'):\n ... raise ValueError(\"value must be 42\")\n\n The context manager produces an :class:`ExceptionInfo` object which can be used to inspect the\n details of the captured exception::\n\n >>> with raises(ValueError) as exc_info:\n ... raise ValueError(\"value must be 42\")\n >>> assert exc_info.type is ValueError\n >>> assert exc_info.value.args[0] == \"value must be 42\"\n\n .. deprecated:: 4.1\n\n In the context manager form you may use the keyword argument\n ``message`` to specify a custom failure message that will be displayed\n in case the ``pytest.raises`` check fails. This has been deprecated as it\n is considered error prone as users often mean to use ``match`` instead.\n\n .. note::\n\n When using ``pytest.raises`` as a context manager, it's worthwhile to\n note that normal context manager rules apply and that the exception\n raised *must* be the final line in the scope of the context manager.\n Lines of code after that, within the scope of the context manager will\n not be executed. For example::\n\n >>> value = 15\n >>> with raises(ValueError) as exc_info:\n ... if value > 10:\n ... raise ValueError(\"value must be <= 10\")\n ... assert exc_info.type is ValueError # this will not execute\n\n Instead, the following approach must be taken (note the difference in\n scope)::\n\n >>> with raises(ValueError) as exc_info:\n ... if value > 10:\n ... raise ValueError(\"value must be <= 10\")\n ...\n >>> assert exc_info.type is ValueError\n\n **Using with** ``pytest.mark.parametrize``\n\n When using :ref:`pytest.mark.parametrize ref`\n it is possible to parametrize tests such that\n some runs raise an exception and others do not.\n\n See :ref:`parametrizing_conditional_raising` for an example.\n\n **Legacy form**\n\n It is possible to specify a callable by passing a to-be-called lambda::\n\n >>> raises(ZeroDivisionError, lambda: 1/0)\n \n\n or you can specify an arbitrary callable with arguments::\n\n >>> def f(x): return 1/x\n ...\n >>> raises(ZeroDivisionError, f, 0)\n \n >>> raises(ZeroDivisionError, f, x=0)\n \n\n The form above is fully supported but discouraged for new code because the\n context manager form is regarded as more readable and less error-prone.\n\n .. note::\n Similar to caught exception objects in Python, explicitly clearing\n local references to returned ``ExceptionInfo`` objects can\n help the Python interpreter speed up its garbage collection.\n\n Clearing those references breaks a reference cycle\n (``ExceptionInfo`` --> caught exception --> frame stack raising\n the exception --> current frame stack --> local variables -->\n ``ExceptionInfo``) which makes Python keep all objects referenced\n from that cycle (including all local variables in the current\n frame) alive until the next cyclic garbage collection run. See the\n official Python ``try`` statement documentation for more detailed\n information.\n\n \"\"\"\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_raises.__tracebackhide___raises.fail_message_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_raises.__tracebackhide___raises.fail_message_", "embedding": null, "metadata": {"file_path": "src/_pytest/python_api.py", "file_name": "python_api.py", "file_type": "text/x-python", "category": "implementation", "start_line": 666, "end_line": 709, "span_ids": ["raises"], "tokens": 381}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def raises(expected_exception, *args, **kwargs):\n __tracebackhide__ = True\n for exc in filterfalse(isclass, always_iterable(expected_exception, BASE_TYPE)):\n msg = (\n \"exceptions must be old-style classes or\"\n \" derived from BaseException, not %s\"\n )\n raise TypeError(msg % type(exc))\n\n message = \"DID NOT RAISE {}\".format(expected_exception)\n match_expr = None\n\n if not args:\n if \"message\" in kwargs:\n message = kwargs.pop(\"message\")\n warnings.warn(deprecated.RAISES_MESSAGE_PARAMETER, stacklevel=2)\n if \"match\" in kwargs:\n match_expr = kwargs.pop(\"match\")\n if kwargs:\n msg = \"Unexpected keyword arguments passed to pytest.raises: \"\n msg += \", \".join(sorted(kwargs))\n raise TypeError(msg)\n return RaisesContext(expected_exception, message, match_expr)\n elif isinstance(args[0], str):\n warnings.warn(deprecated.RAISES_EXEC, stacklevel=2)\n code, = args\n assert isinstance(code, str)\n frame = sys._getframe(1)\n loc = frame.f_locals.copy()\n loc.update(kwargs)\n # print \"raises frame scope: %r\" % frame.f_locals\n try:\n code = _pytest._code.Source(code).compile(_genframe=frame)\n six.exec_(code, frame.f_globals, loc)\n # XXX didn't mean f_globals == f_locals something special?\n # this is destroyed here ...\n except expected_exception:\n return _pytest._code.ExceptionInfo.from_current()\n else:\n func = args[0]\n try:\n func(*args[1:], **kwargs)\n except expected_exception:\n return _pytest._code.ExceptionInfo.from_current()\n fail(message)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_raises.Exception_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/python_api.py_raises.Exception_", "embedding": null, "metadata": {"file_path": "src/_pytest/python_api.py", "file_name": "python_api.py", "file_type": "text/x-python", "category": "implementation", "start_line": 718, "end_line": 743, "span_ids": ["RaisesContext", "RaisesContext.__exit__", "RaisesContext.__enter__", "impl:3"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "raises.Exception = fail.Exception\n\n\nclass RaisesContext(object):\n def __init__(self, expected_exception, message, match_expr):\n self.expected_exception = expected_exception\n self.message = message\n self.match_expr = match_expr\n self.excinfo = None\n\n def __enter__(self):\n self.excinfo = _pytest._code.ExceptionInfo.for_later()\n return self.excinfo\n\n def __exit__(self, *tp):\n __tracebackhide__ = True\n if tp[0] is None:\n fail(self.message)\n self.excinfo.__init__(tp)\n suppress_exception = issubclass(self.excinfo.type, self.expected_exception)\n if sys.version_info[0] == 2 and suppress_exception:\n sys.exc_clear()\n if self.match_expr is not None and suppress_exception:\n self.excinfo.match(self.match_expr)\n return suppress_exception", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/recwarn.py__recording_warnings_du_recwarn.with_wrec_.yield_wrec": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/recwarn.py__recording_warnings_du_recwarn.with_wrec_.yield_wrec", "embedding": null, "metadata": {"file_path": "src/_pytest/recwarn.py", "file_name": "recwarn.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 30, "span_ids": ["imports", "recwarn", "docstring"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" recording warnings during test function execution. \"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport inspect\nimport re\nimport sys\nimport warnings\n\nimport six\n\nimport _pytest._code\nfrom _pytest.deprecated import PYTEST_WARNS_UNKNOWN_KWARGS\nfrom _pytest.deprecated import WARNS_EXEC\nfrom _pytest.fixtures import yield_fixture\nfrom _pytest.outcomes import fail\n\n\n@yield_fixture\ndef recwarn():\n \"\"\"Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions.\n\n See http://docs.python.org/library/warnings.html for information\n on warning categories.\n \"\"\"\n wrec = WarningsRecorder()\n with wrec:\n warnings.simplefilter(\"default\")\n yield wrec", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/recwarn.py_deprecated_call_deprecated_call.return.warns_DeprecationWarning": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/recwarn.py_deprecated_call_deprecated_call.return.warns_DeprecationWarning", "embedding": null, "metadata": {"file_path": "src/_pytest/recwarn.py", "file_name": "recwarn.py", "file_type": "text/x-python", "category": "implementation", "start_line": 33, "end_line": 52, "span_ids": ["deprecated_call"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def deprecated_call(func=None, *args, **kwargs):\n \"\"\"context manager that can be used to ensure a block of code triggers a\n ``DeprecationWarning`` or ``PendingDeprecationWarning``::\n\n >>> import warnings\n >>> def api_call_v2():\n ... warnings.warn('use v3 of this api', DeprecationWarning)\n ... return 200\n\n >>> with deprecated_call():\n ... assert api_call_v2() == 200\n\n ``deprecated_call`` can also be used by passing a function and ``*args`` and ``*kwargs``,\n in which case it will ensure calling ``func(*args, **kwargs)`` produces one of the warnings\n types above.\n \"\"\"\n __tracebackhide__ = True\n if func is not None:\n args = (func,) + args\n return warns((DeprecationWarning, PendingDeprecationWarning), *args, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/recwarn.py_warns_warns.if_not_args_.else_.with_WarningsChecker_expe.return.func_args_1_kwargs_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/recwarn.py_warns_warns.if_not_args_.else_.with_WarningsChecker_expe.return.func_args_1_kwargs_", "embedding": null, "metadata": {"file_path": "src/_pytest/recwarn.py", "file_name": "recwarn.py", "file_type": "text/x-python", "category": "implementation", "start_line": 55, "end_line": 109, "span_ids": ["warns"], "tokens": 477}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def warns(expected_warning, *args, **kwargs):\n r\"\"\"Assert that code raises a particular class of warning.\n\n Specifically, the parameter ``expected_warning`` can be a warning class or\n sequence of warning classes, and the inside the ``with`` block must issue a warning of that class or\n classes.\n\n This helper produces a list of :class:`warnings.WarningMessage` objects,\n one for each warning raised.\n\n This function can be used as a context manager, or any of the other ways\n ``pytest.raises`` can be used::\n\n >>> with warns(RuntimeWarning):\n ... warnings.warn(\"my warning\", RuntimeWarning)\n\n In the context manager form you may use the keyword argument ``match`` to assert\n that the exception matches a text or regex::\n\n >>> with warns(UserWarning, match='must be 0 or None'):\n ... warnings.warn(\"value must be 0 or None\", UserWarning)\n\n >>> with warns(UserWarning, match=r'must be \\d+$'):\n ... warnings.warn(\"value must be 42\", UserWarning)\n\n >>> with warns(UserWarning, match=r'must be \\d+$'):\n ... warnings.warn(\"this is not here\", UserWarning)\n Traceback (most recent call last):\n ...\n Failed: DID NOT WARN. No warnings of type ...UserWarning... was emitted...\n\n \"\"\"\n __tracebackhide__ = True\n if not args:\n match_expr = kwargs.pop(\"match\", None)\n if kwargs:\n warnings.warn(\n PYTEST_WARNS_UNKNOWN_KWARGS.format(args=sorted(kwargs)), stacklevel=2\n )\n return WarningsChecker(expected_warning, match_expr=match_expr)\n elif isinstance(args[0], str):\n warnings.warn(WARNS_EXEC, stacklevel=2)\n code, = args\n assert isinstance(code, str)\n frame = sys._getframe(1)\n loc = frame.f_locals.copy()\n loc.update(kwargs)\n\n with WarningsChecker(expected_warning):\n code = _pytest._code.Source(code).compile()\n six.exec_(code, frame.f_globals, loc)\n else:\n func = args[0]\n with WarningsChecker(expected_warning):\n return func(*args[1:], **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/recwarn.py_WarningsRecorder_WarningsRecorder.clear.self__list_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/recwarn.py_WarningsRecorder_WarningsRecorder.clear.self__list_", "embedding": null, "metadata": {"file_path": "src/_pytest/recwarn.py", "file_name": "recwarn.py", "file_type": "text/x-python", "category": "implementation", "start_line": 112, "end_line": 150, "span_ids": ["WarningsRecorder.__getitem__", "WarningsRecorder", "WarningsRecorder.clear", "WarningsRecorder.__len__", "WarningsRecorder.list", "WarningsRecorder.__iter__", "WarningsRecorder.pop"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class WarningsRecorder(warnings.catch_warnings):\n \"\"\"A context manager to record raised warnings.\n\n Adapted from `warnings.catch_warnings`.\n \"\"\"\n\n def __init__(self):\n super(WarningsRecorder, self).__init__(record=True)\n self._entered = False\n self._list = []\n\n @property\n def list(self):\n \"\"\"The list of recorded warnings.\"\"\"\n return self._list\n\n def __getitem__(self, i):\n \"\"\"Get a recorded warning by index.\"\"\"\n return self._list[i]\n\n def __iter__(self):\n \"\"\"Iterate through the recorded warnings.\"\"\"\n return iter(self._list)\n\n def __len__(self):\n \"\"\"The number of recorded warnings.\"\"\"\n return len(self._list)\n\n def pop(self, cls=Warning):\n \"\"\"Pop the first recorded warning, raise exception if not exists.\"\"\"\n for i, w in enumerate(self._list):\n if issubclass(w.category, cls):\n return self._list.pop(i)\n __tracebackhide__ = True\n raise AssertionError(\"%r not found in warning list\" % cls)\n\n def clear(self):\n \"\"\"Clear the list of recorded warnings.\"\"\"\n self._list[:] = []", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/recwarn.py_WarningsRecorder.__enter___WarningsRecorder.__enter__.return.self": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/recwarn.py_WarningsRecorder.__enter___WarningsRecorder.__enter__.return.self", "embedding": null, "metadata": {"file_path": "src/_pytest/recwarn.py", "file_name": "recwarn.py", "file_type": "text/x-python", "category": "implementation", "start_line": 152, "end_line": 187, "span_ids": ["WarningsRecorder.__enter__"], "tokens": 338}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class WarningsRecorder(warnings.catch_warnings):\n\n def __enter__(self):\n if self._entered:\n __tracebackhide__ = True\n raise RuntimeError(\"Cannot enter %r twice\" % self)\n self._list = super(WarningsRecorder, self).__enter__()\n warnings.simplefilter(\"always\")\n # python3 keeps track of a \"filter version\", when the filters are\n # updated previously seen warnings can be re-warned. python2 has no\n # concept of this so we must reset the warnings registry manually.\n # trivial patching of `warnings.warn` seems to be enough somehow?\n if six.PY2:\n\n def warn(message, category=None, stacklevel=1):\n # duplicate the stdlib logic due to\n # bad handing in the c version of warnings\n if isinstance(message, Warning):\n category = message.__class__\n # Check category argument\n if category is None:\n category = UserWarning\n assert issubclass(category, Warning)\n\n # emulate resetting the warn registry\n f_globals = sys._getframe(stacklevel).f_globals\n if \"__warningregistry__\" in f_globals:\n orig = f_globals[\"__warningregistry__\"]\n f_globals[\"__warningregistry__\"] = None\n try:\n return self._saved_warn(message, category, stacklevel + 1)\n finally:\n f_globals[\"__warningregistry__\"] = orig\n else:\n return self._saved_warn(message, category, stacklevel + 1)\n\n warnings.warn, self._saved_warn = warn, warnings.warn\n return self", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/recwarn.py_WarningsRecorder.__exit___WarningsRecorder.__exit__.self._entered.False": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/recwarn.py_WarningsRecorder.__exit___WarningsRecorder.__exit__.self._entered.False", "embedding": null, "metadata": {"file_path": "src/_pytest/recwarn.py", "file_name": "recwarn.py", "file_type": "text/x-python", "category": "implementation", "start_line": 189, "end_line": 200, "span_ids": ["WarningsRecorder.__exit__"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class WarningsRecorder(warnings.catch_warnings):\n\n def __exit__(self, *exc_info):\n if not self._entered:\n __tracebackhide__ = True\n raise RuntimeError(\"Cannot exit %r without entering first\" % self)\n # see above where `self._saved_warn` is assigned\n if six.PY2:\n warnings.warn = self._saved_warn\n super(WarningsRecorder, self).__exit__(*exc_info)\n\n # Built-in catch_warnings does not reset entered state so we do it\n # manually here for this context manager to become reusable.\n self._entered = False", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/recwarn.py_WarningsChecker_WarningsChecker.__init__.self.match_expr.match_expr": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/recwarn.py_WarningsChecker_WarningsChecker.__init__.self.match_expr.match_expr", "embedding": null, "metadata": {"file_path": "src/_pytest/recwarn.py", "file_name": "recwarn.py", "file_type": "text/x-python", "category": "implementation", "start_line": 203, "end_line": 218, "span_ids": ["WarningsChecker"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class WarningsChecker(WarningsRecorder):\n def __init__(self, expected_warning=None, match_expr=None):\n super(WarningsChecker, self).__init__()\n\n msg = \"exceptions must be old-style classes or derived from Warning, not %s\"\n if isinstance(expected_warning, tuple):\n for exc in expected_warning:\n if not inspect.isclass(exc):\n raise TypeError(msg % type(exc))\n elif inspect.isclass(expected_warning):\n expected_warning = (expected_warning,)\n elif expected_warning is not None:\n raise TypeError(msg % type(expected_warning))\n\n self.expected_warning = expected_warning\n self.match_expr = match_expr", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/recwarn.py_WarningsChecker.__exit___": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/recwarn.py_WarningsChecker.__exit___", "embedding": null, "metadata": {"file_path": "src/_pytest/recwarn.py", "file_name": "recwarn.py", "file_type": "text/x-python", "category": "implementation", "start_line": 220, "end_line": 251, "span_ids": ["WarningsChecker.__exit__"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class WarningsChecker(WarningsRecorder):\n\n def __exit__(self, *exc_info):\n super(WarningsChecker, self).__exit__(*exc_info)\n\n __tracebackhide__ = True\n\n # only check if we're not currently handling an exception\n if all(a is None for a in exc_info):\n if self.expected_warning is not None:\n if not any(issubclass(r.category, self.expected_warning) for r in self):\n __tracebackhide__ = True\n fail(\n \"DID NOT WARN. No warnings of type {} was emitted. \"\n \"The list of emitted warnings is: {}.\".format(\n self.expected_warning, [each.message for each in self]\n )\n )\n elif self.match_expr is not None:\n for r in self:\n if issubclass(r.category, self.expected_warning):\n if re.compile(self.match_expr).search(str(r.message)):\n break\n else:\n fail(\n \"DID NOT WARN. No warnings of type {} matching\"\n \" ('{}') was emitted. The list of emitted warnings\"\n \" is: {}.\".format(\n self.expected_warning,\n self.match_expr,\n [each.message for each in self],\n )\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_from_pprint_import_pprint_getslaveinfoline.try_.except_AttributeError_.return.s": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_from_pprint_import_pprint_getslaveinfoline.try_.except_AttributeError_.return.s", "embedding": null, "metadata": {"file_path": "src/_pytest/reports.py", "file_name": "reports.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 31, "span_ids": ["imports", "getslaveinfoline"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from pprint import pprint\n\nimport py\nimport six\n\nfrom _pytest._code.code import ExceptionInfo\nfrom _pytest._code.code import ReprEntry\nfrom _pytest._code.code import ReprEntryNative\nfrom _pytest._code.code import ReprExceptionInfo\nfrom _pytest._code.code import ReprFileLocation\nfrom _pytest._code.code import ReprFuncArgs\nfrom _pytest._code.code import ReprLocals\nfrom _pytest._code.code import ReprTraceback\nfrom _pytest._code.code import TerminalRepr\nfrom _pytest.outcomes import skip\nfrom _pytest.pathlib import Path\n\n\ndef getslaveinfoline(node):\n try:\n return node._slaveinfocache\n except AttributeError:\n d = node.slaveinfo\n ver = \"%s.%s.%s\" % d[\"version_info\"][:3]\n node._slaveinfocache = s = \"[%s] %s -- Python %s %s\" % (\n d[\"id\"],\n d[\"sysplatform\"],\n ver,\n d[\"executable\"],\n )\n return s", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_BaseReport_BaseReport.count_towards_summary.return.True": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_BaseReport_BaseReport.count_towards_summary.return.True", "embedding": null, "metadata": {"file_path": "src/_pytest/reports.py", "file_name": "reports.py", "file_type": "text/x-python", "category": "implementation", "start_line": 34, "end_line": 127, "span_ids": ["BaseReport", "BaseReport.capstdout", "BaseReport.count_towards_summary", "BaseReport.caplog", "BaseReport.capstderr", "BaseReport.fspath", "BaseReport:6", "BaseReport.toterminal", "BaseReport.longreprtext", "BaseReport.get_sections"], "tokens": 567}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BaseReport(object):\n when = None\n location = None\n\n def __init__(self, **kw):\n self.__dict__.update(kw)\n\n def toterminal(self, out):\n if hasattr(self, \"node\"):\n out.line(getslaveinfoline(self.node))\n\n longrepr = self.longrepr\n if longrepr is None:\n return\n\n if hasattr(longrepr, \"toterminal\"):\n longrepr.toterminal(out)\n else:\n try:\n out.line(longrepr)\n except UnicodeEncodeError:\n out.line(\"\")\n\n def get_sections(self, prefix):\n for name, content in self.sections:\n if name.startswith(prefix):\n yield prefix, content\n\n @property\n def longreprtext(self):\n \"\"\"\n Read-only property that returns the full string representation\n of ``longrepr``.\n\n .. versionadded:: 3.0\n \"\"\"\n tw = py.io.TerminalWriter(stringio=True)\n tw.hasmarkup = False\n self.toterminal(tw)\n exc = tw.stringio.getvalue()\n return exc.strip()\n\n @property\n def caplog(self):\n \"\"\"Return captured log lines, if log capturing is enabled\n\n .. versionadded:: 3.5\n \"\"\"\n return \"\\n\".join(\n content for (prefix, content) in self.get_sections(\"Captured log\")\n )\n\n @property\n def capstdout(self):\n \"\"\"Return captured text from stdout, if capturing is enabled\n\n .. versionadded:: 3.0\n \"\"\"\n return \"\".join(\n content for (prefix, content) in self.get_sections(\"Captured stdout\")\n )\n\n @property\n def capstderr(self):\n \"\"\"Return captured text from stderr, if capturing is enabled\n\n .. versionadded:: 3.0\n \"\"\"\n return \"\".join(\n content for (prefix, content) in self.get_sections(\"Captured stderr\")\n )\n\n passed = property(lambda x: x.outcome == \"passed\")\n failed = property(lambda x: x.outcome == \"failed\")\n skipped = property(lambda x: x.outcome == \"skipped\")\n\n @property\n def fspath(self):\n return self.nodeid.split(\"::\")[0]\n\n @property\n def count_towards_summary(self):\n \"\"\"\n **Experimental**\n\n Returns True if this report should be counted towards the totals shown at the end of the\n test session: \"1 passed, 1 failure, etc\".\n\n .. note::\n\n This function is considered **experimental**, so beware that it is subject to changes\n even in patch releases.\n \"\"\"\n return True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_BaseReport.head_line_BaseReport._get_verbose_word.return.verbose": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_BaseReport.head_line_BaseReport._get_verbose_word.return.verbose", "embedding": null, "metadata": {"file_path": "src/_pytest/reports.py", "file_name": "reports.py", "file_type": "text/x-python", "category": "implementation", "start_line": 129, "end_line": 155, "span_ids": ["BaseReport._get_verbose_word", "BaseReport.head_line"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BaseReport(object):\n\n @property\n def head_line(self):\n \"\"\"\n **Experimental**\n\n Returns the head line shown with longrepr output for this report, more commonly during\n traceback representation during failures::\n\n ________ Test.foo ________\n\n\n In the example above, the head_line is \"Test.foo\".\n\n .. note::\n\n This function is considered **experimental**, so beware that it is subject to changes\n even in patch releases.\n \"\"\"\n if self.location is not None:\n fspath, lineno, domain = self.location\n return domain\n\n def _get_verbose_word(self, config):\n _category, _short, verbose = config.hook.pytest_report_teststatus(\n report=self, config=config\n )\n return verbose", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_BaseReport._to_json_BaseReport._to_json.disassembled_report.return._": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_BaseReport._to_json_BaseReport._to_json.disassembled_report.return._", "embedding": null, "metadata": {"file_path": "src/_pytest/reports.py", "file_name": "reports.py", "file_type": "text/x-python", "category": "implementation", "start_line": 157, "end_line": 188, "span_ids": ["BaseReport._to_json"], "tokens": 238}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BaseReport(object):\n\n def _to_json(self):\n \"\"\"\n This was originally the serialize_report() function from xdist (ca03269).\n\n Returns the contents of this report as a dict of builtin entries, suitable for\n serialization.\n\n Experimental method.\n \"\"\"\n\n def disassembled_report(rep):\n reprtraceback = rep.longrepr.reprtraceback.__dict__.copy()\n reprcrash = rep.longrepr.reprcrash.__dict__.copy()\n\n new_entries = []\n for entry in reprtraceback[\"reprentries\"]:\n entry_data = {\n \"type\": type(entry).__name__,\n \"data\": entry.__dict__.copy(),\n }\n for key, value in entry_data[\"data\"].items():\n if hasattr(value, \"__dict__\"):\n entry_data[\"data\"][key] = value.__dict__.copy()\n new_entries.append(entry_data)\n\n reprtraceback[\"reprentries\"] = new_entries\n\n return {\n \"reprcrash\": reprcrash,\n \"reprtraceback\": reprtraceback,\n \"sections\": rep.longrepr.sections,\n }\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_BaseReport._to_json.d_BaseReport._to_json.return.d": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_BaseReport._to_json.d_BaseReport._to_json.return.d", "embedding": null, "metadata": {"file_path": "src/_pytest/reports.py", "file_name": "reports.py", "file_type": "text/x-python", "category": "implementation", "start_line": 190, "end_line": 205, "span_ids": ["BaseReport._to_json"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BaseReport(object):\n\n def _to_json(self):\n # ... other code\n\n d = self.__dict__.copy()\n if hasattr(self.longrepr, \"toterminal\"):\n if hasattr(self.longrepr, \"reprtraceback\") and hasattr(\n self.longrepr, \"reprcrash\"\n ):\n d[\"longrepr\"] = disassembled_report(self)\n else:\n d[\"longrepr\"] = six.text_type(self.longrepr)\n else:\n d[\"longrepr\"] = self.longrepr\n for name in d:\n if isinstance(d[name], (py.path.local, Path)):\n d[name] = str(d[name])\n elif name == \"result\":\n d[name] = None # for now\n return d", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_BaseReport._from_json_BaseReport._from_json.return.cls_reportdict_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_BaseReport._from_json_BaseReport._from_json.return.cls_reportdict_", "embedding": null, "metadata": {"file_path": "src/_pytest/reports.py", "file_name": "reports.py", "file_type": "text/x-python", "category": "implementation", "start_line": 207, "end_line": 265, "span_ids": ["BaseReport._from_json"], "tokens": 481}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BaseReport(object):\n\n @classmethod\n def _from_json(cls, reportdict):\n \"\"\"\n This was originally the serialize_report() function from xdist (ca03269).\n\n Factory method that returns either a TestReport or CollectReport, depending on the calling\n class. It's the callers responsibility to know which class to pass here.\n\n Experimental method.\n \"\"\"\n if reportdict[\"longrepr\"]:\n if (\n \"reprcrash\" in reportdict[\"longrepr\"]\n and \"reprtraceback\" in reportdict[\"longrepr\"]\n ):\n\n reprtraceback = reportdict[\"longrepr\"][\"reprtraceback\"]\n reprcrash = reportdict[\"longrepr\"][\"reprcrash\"]\n\n unserialized_entries = []\n reprentry = None\n for entry_data in reprtraceback[\"reprentries\"]:\n data = entry_data[\"data\"]\n entry_type = entry_data[\"type\"]\n if entry_type == \"ReprEntry\":\n reprfuncargs = None\n reprfileloc = None\n reprlocals = None\n if data[\"reprfuncargs\"]:\n reprfuncargs = ReprFuncArgs(**data[\"reprfuncargs\"])\n if data[\"reprfileloc\"]:\n reprfileloc = ReprFileLocation(**data[\"reprfileloc\"])\n if data[\"reprlocals\"]:\n reprlocals = ReprLocals(data[\"reprlocals\"][\"lines\"])\n\n reprentry = ReprEntry(\n lines=data[\"lines\"],\n reprfuncargs=reprfuncargs,\n reprlocals=reprlocals,\n filelocrepr=reprfileloc,\n style=data[\"style\"],\n )\n elif entry_type == \"ReprEntryNative\":\n reprentry = ReprEntryNative(data[\"lines\"])\n else:\n _report_unserialization_failure(entry_type, cls, reportdict)\n unserialized_entries.append(reprentry)\n reprtraceback[\"reprentries\"] = unserialized_entries\n\n exception_info = ReprExceptionInfo(\n reprtraceback=ReprTraceback(**reprtraceback),\n reprcrash=ReprFileLocation(**reprcrash),\n )\n\n for section in reportdict[\"longrepr\"][\"sections\"]:\n exception_info.addsection(*section)\n reportdict[\"longrepr\"] = exception_info\n\n return cls(**reportdict)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py__report_unserialization_failure__report_unserialization_failure.raise_RuntimeError_stream": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py__report_unserialization_failure__report_unserialization_failure.raise_RuntimeError_stream", "embedding": null, "metadata": {"file_path": "src/_pytest/reports.py", "file_name": "reports.py", "file_type": "text/x-python", "category": "implementation", "start_line": 268, "end_line": 277, "span_ids": ["_report_unserialization_failure"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _report_unserialization_failure(type_name, report_class, reportdict):\n url = \"https://github.com/pytest-dev/pytest/issues\"\n stream = py.io.TextIO()\n pprint(\"-\" * 100, stream=stream)\n pprint(\"INTERNALERROR: Unknown entry type returned: %s\" % type_name, stream=stream)\n pprint(\"report_name: %s\" % report_class, stream=stream)\n pprint(reportdict, stream=stream)\n pprint(\"Please report this bug at %s\" % url, stream=stream)\n pprint(\"-\" * 100, stream=stream)\n raise RuntimeError(stream.getvalue())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_TestReport_TestReport.__repr__.return._s_r_when_r_outcome_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_TestReport_TestReport.__repr__.return._s_r_when_r_outcome_", "embedding": null, "metadata": {"file_path": "src/_pytest/reports.py", "file_name": "reports.py", "file_type": "text/x-python", "category": "implementation", "start_line": 280, "end_line": 342, "span_ids": ["TestReport", "TestReport.__repr__"], "tokens": 421}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestReport(BaseReport):\n \"\"\" Basic test report object (also used for setup and teardown calls if\n they fail).\n \"\"\"\n\n __test__ = False\n\n def __init__(\n self,\n nodeid,\n location,\n keywords,\n outcome,\n longrepr,\n when,\n sections=(),\n duration=0,\n user_properties=None,\n **extra\n ):\n #: normalized collection node id\n self.nodeid = nodeid\n\n #: a (filesystempath, lineno, domaininfo) tuple indicating the\n #: actual location of a test item - it might be different from the\n #: collected one e.g. if a method is inherited from a different module.\n self.location = location\n\n #: a name -> value dictionary containing all keywords and\n #: markers associated with a test invocation.\n self.keywords = keywords\n\n #: test outcome, always one of \"passed\", \"failed\", \"skipped\".\n self.outcome = outcome\n\n #: None or a failure representation.\n self.longrepr = longrepr\n\n #: one of 'setup', 'call', 'teardown' to indicate runtest phase.\n self.when = when\n\n #: user properties is a list of tuples (name, value) that holds user\n #: defined properties of the test\n self.user_properties = list(user_properties or [])\n\n #: list of pairs ``(str, str)`` of extra information which needs to\n #: marshallable. Used by pytest to add captured text\n #: from ``stdout`` and ``stderr``, but may be used by other plugins\n #: to add arbitrary information to reports.\n self.sections = list(sections)\n\n #: time it took to run just the test\n self.duration = duration\n\n self.__dict__.update(extra)\n\n def __repr__(self):\n return \"<%s %r when=%r outcome=%r>\" % (\n self.__class__.__name__,\n self.nodeid,\n self.when,\n self.outcome,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_TestReport.from_item_and_call_TestReport.from_item_and_call.return.cls_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_TestReport.from_item_and_call_TestReport.from_item_and_call.return.cls_", "embedding": null, "metadata": {"file_path": "src/_pytest/reports.py", "file_name": "reports.py", "file_type": "text/x-python", "category": "implementation", "start_line": 344, "end_line": 385, "span_ids": ["TestReport.from_item_and_call"], "tokens": 315}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestReport(BaseReport):\n\n @classmethod\n def from_item_and_call(cls, item, call):\n \"\"\"\n Factory method to create and fill a TestReport with standard item and call info.\n \"\"\"\n when = call.when\n duration = call.stop - call.start\n keywords = {x: 1 for x in item.keywords}\n excinfo = call.excinfo\n sections = []\n if not call.excinfo:\n outcome = \"passed\"\n longrepr = None\n else:\n if not isinstance(excinfo, ExceptionInfo):\n outcome = \"failed\"\n longrepr = excinfo\n elif excinfo.errisinstance(skip.Exception):\n outcome = \"skipped\"\n r = excinfo._getreprcrash()\n longrepr = (str(r.path), r.lineno, r.message)\n else:\n outcome = \"failed\"\n if call.when == \"call\":\n longrepr = item.repr_failure(excinfo)\n else: # exception in setup or teardown\n longrepr = item._repr_failure_py(\n excinfo, style=item.config.getoption(\"tbstyle\", \"auto\")\n )\n for rwhen, key, content in item._report_sections:\n sections.append((\"Captured %s %s\" % (key, rwhen), content))\n return cls(\n item.nodeid,\n item.location,\n keywords,\n outcome,\n longrepr,\n when,\n sections,\n duration,\n user_properties=item.user_properties,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_CollectReport_CollectReport.__repr__.return._CollectReport_r_lenres": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_CollectReport_CollectReport.__repr__.return._CollectReport_r_lenres", "embedding": null, "metadata": {"file_path": "src/_pytest/reports.py", "file_name": "reports.py", "file_type": "text/x-python", "category": "implementation", "start_line": 388, "end_line": 408, "span_ids": ["CollectReport.location", "CollectReport.__repr__", "CollectReport"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CollectReport(BaseReport):\n when = \"collect\"\n\n def __init__(self, nodeid, outcome, longrepr, result, sections=(), **extra):\n self.nodeid = nodeid\n self.outcome = outcome\n self.longrepr = longrepr\n self.result = result or []\n self.sections = list(sections)\n self.__dict__.update(extra)\n\n @property\n def location(self):\n return (self.fspath, None, self.fspath)\n\n def __repr__(self):\n return \"\" % (\n self.nodeid,\n len(self.result),\n self.outcome,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_CollectErrorRepr_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/reports.py_CollectErrorRepr_", "embedding": null, "metadata": {"file_path": "src/_pytest/reports.py", "file_name": "reports.py", "file_type": "text/x-python", "category": "implementation", "start_line": 411, "end_line": 435, "span_ids": ["pytest_report_from_serializable", "pytest_report_to_serializable", "CollectErrorRepr", "CollectErrorRepr.toterminal"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class CollectErrorRepr(TerminalRepr):\n def __init__(self, msg):\n self.longrepr = msg\n\n def toterminal(self, out):\n out.line(self.longrepr, red=True)\n\n\ndef pytest_report_to_serializable(report):\n if isinstance(report, (TestReport, CollectReport)):\n data = report._to_json()\n data[\"_report_type\"] = report.__class__.__name__\n return data\n\n\ndef pytest_report_from_serializable(data):\n if \"_report_type\" in data:\n if data[\"_report_type\"] == \"TestReport\":\n return TestReport._from_json(data)\n elif data[\"_report_type\"] == \"CollectReport\":\n return CollectReport._from_json(data)\n assert False, \"Unknown report_type unserialize data: {}\".format(\n data[\"_report_type\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/resultlog.py__log_machine_parseable_pytest_addoption.group_addoption_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/resultlog.py__log_machine_parseable_pytest_addoption.group_addoption_", "embedding": null, "metadata": {"file_path": "src/_pytest/resultlog.py", "file_name": "resultlog.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 22, "span_ids": ["imports", "pytest_addoption", "docstring"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" log machine-parseable test session result information in a plain\ntext file.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport py\n\n\ndef pytest_addoption(parser):\n group = parser.getgroup(\"terminal reporting\", \"resultlog plugin options\")\n group.addoption(\n \"--resultlog\",\n \"--result-log\",\n action=\"store\",\n metavar=\"path\",\n default=None,\n help=\"DEPRECATED path for machine-readable result log.\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/resultlog.py_pytest_configure_pytest_unconfigure.if_resultlog_.config_pluginmanager_unre": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/resultlog.py_pytest_configure_pytest_unconfigure.if_resultlog_.config_pluginmanager_unre", "embedding": null, "metadata": {"file_path": "src/_pytest/resultlog.py", "file_name": "resultlog.py", "file_type": "text/x-python", "category": "implementation", "start_line": 25, "end_line": 47, "span_ids": ["pytest_unconfigure", "pytest_configure"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_configure(config):\n resultlog = config.option.resultlog\n # prevent opening resultlog on slave nodes (xdist)\n if resultlog and not hasattr(config, \"slaveinput\"):\n dirname = os.path.dirname(os.path.abspath(resultlog))\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n logfile = open(resultlog, \"w\", 1) # line buffered\n config._resultlog = ResultLog(config, logfile)\n config.pluginmanager.register(config._resultlog)\n\n from _pytest.deprecated import RESULT_LOG\n from _pytest.warnings import _issue_warning_captured\n\n _issue_warning_captured(RESULT_LOG, config.hook, stacklevel=2)\n\n\ndef pytest_unconfigure(config):\n resultlog = getattr(config, \"_resultlog\", None)\n if resultlog:\n resultlog.logfile.close()\n del config._resultlog\n config.pluginmanager.unregister(resultlog)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/resultlog.py_ResultLog_ResultLog.log_outcome.self_write_log_entry_test": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/resultlog.py_ResultLog_ResultLog.log_outcome.self_write_log_entry_test", "embedding": null, "metadata": {"file_path": "src/_pytest/resultlog.py", "file_name": "resultlog.py", "file_type": "text/x-python", "category": "implementation", "start_line": 50, "end_line": 64, "span_ids": ["ResultLog.write_log_entry", "ResultLog", "ResultLog.log_outcome"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ResultLog(object):\n def __init__(self, config, logfile):\n self.config = config\n self.logfile = logfile # preferably line buffered\n\n def write_log_entry(self, testpath, lettercode, longrepr):\n print(\"%s %s\" % (lettercode, testpath), file=self.logfile)\n for line in longrepr.splitlines():\n print(\" %s\" % line, file=self.logfile)\n\n def log_outcome(self, report, lettercode, longrepr):\n testpath = getattr(report, \"nodeid\", None)\n if testpath is None:\n testpath = report.fspath\n self.write_log_entry(testpath, lettercode, longrepr)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/resultlog.py_ResultLog.pytest_runtest_logreport_ResultLog.pytest_runtest_logreport.self_log_outcome_report_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/resultlog.py_ResultLog.pytest_runtest_logreport_ResultLog.pytest_runtest_logreport.self_log_outcome_report_", "embedding": null, "metadata": {"file_path": "src/_pytest/resultlog.py", "file_name": "resultlog.py", "file_type": "text/x-python", "category": "implementation", "start_line": 66, "end_line": 83, "span_ids": ["ResultLog.pytest_runtest_logreport"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ResultLog(object):\n\n def pytest_runtest_logreport(self, report):\n if report.when != \"call\" and report.passed:\n return\n res = self.config.hook.pytest_report_teststatus(\n report=report, config=self.config\n )\n code = res[1]\n if code == \"x\":\n longrepr = str(report.longrepr)\n elif code == \"X\":\n longrepr = \"\"\n elif report.passed:\n longrepr = \"\"\n elif report.failed:\n longrepr = str(report.longrepr)\n elif report.skipped:\n longrepr = str(report.longrepr[2])\n self.log_outcome(report, code, longrepr)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/resultlog.py_ResultLog.pytest_collectreport_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/resultlog.py_ResultLog.pytest_collectreport_", "embedding": null, "metadata": {"file_path": "src/_pytest/resultlog.py", "file_name": "resultlog.py", "file_type": "text/x-python", "category": "implementation", "start_line": 85, "end_line": 102, "span_ids": ["ResultLog.pytest_internalerror", "ResultLog.pytest_collectreport"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ResultLog(object):\n\n def pytest_collectreport(self, report):\n if not report.passed:\n if report.failed:\n code = \"F\"\n longrepr = str(report.longrepr)\n else:\n assert report.skipped\n code = \"S\"\n longrepr = \"%s:%d: %s\" % report.longrepr\n self.log_outcome(report, code, longrepr)\n\n def pytest_internalerror(self, excrepr):\n reprcrash = getattr(excrepr, \"reprcrash\", None)\n path = getattr(reprcrash, \"path\", None)\n if path is None:\n path = \"cwd:%s\" % py.path.local()\n self.write_log_entry(path, \"!\", str(excrepr))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py__basic_collect_and_run_pytest_addoption.group_addoption_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py__basic_collect_and_run_pytest_addoption.group_addoption_", "embedding": null, "metadata": {"file_path": "src/_pytest/runner.py", "file_name": "runner.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 35, "span_ids": ["imports", "pytest_addoption", "docstring"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" basic collect and runtest protocol implementations \"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport bdb\nimport os\nimport sys\nfrom time import time\n\nimport attr\nimport six\n\nfrom .reports import CollectErrorRepr\nfrom .reports import CollectReport\nfrom .reports import TestReport\nfrom _pytest._code.code import ExceptionInfo\nfrom _pytest.outcomes import Exit\nfrom _pytest.outcomes import Skipped\nfrom _pytest.outcomes import TEST_OUTCOME\n\n#\n# pytest plugin hooks\n\n\ndef pytest_addoption(parser):\n group = parser.getgroup(\"terminal reporting\", \"reporting\", after=\"general\")\n group.addoption(\n \"--durations\",\n action=\"store\",\n type=int,\n default=None,\n metavar=\"N\",\n help=\"show N slowest setup/test durations (N=0 for all).\",\n ),", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_pytest_terminal_summary_pytest_terminal_summary.for_rep_in_dlist_.tr_write_line_02_2fs_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_pytest_terminal_summary_pytest_terminal_summary.for_rep_in_dlist_.tr_write_line_02_2fs_", "embedding": null, "metadata": {"file_path": "src/_pytest/runner.py", "file_name": "runner.py", "file_type": "text/x-python", "category": "implementation", "start_line": 38, "end_line": 64, "span_ids": ["pytest_terminal_summary"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_terminal_summary(terminalreporter):\n durations = terminalreporter.config.option.durations\n verbose = terminalreporter.config.getvalue(\"verbose\")\n if durations is None:\n return\n tr = terminalreporter\n dlist = []\n for replist in tr.stats.values():\n for rep in replist:\n if hasattr(rep, \"duration\"):\n dlist.append(rep)\n if not dlist:\n return\n dlist.sort(key=lambda x: x.duration)\n dlist.reverse()\n if not durations:\n tr.write_sep(\"=\", \"slowest test durations\")\n else:\n tr.write_sep(\"=\", \"slowest %s test durations\" % durations)\n dlist = dlist[:durations]\n\n for rep in dlist:\n if verbose < 2 and rep.duration < 0.005:\n tr.write_line(\"\")\n tr.write_line(\"(0.00 durations hidden. Use -vv to show these durations.)\")\n break\n tr.write_line(\"%02.2fs %-8s %s\" % (rep.duration, rep.when, rep.nodeid))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_pytest_sessionstart_runtestprotocol.return.reports": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_pytest_sessionstart_runtestprotocol.return.reports", "embedding": null, "metadata": {"file_path": "src/_pytest/runner.py", "file_name": "runner.py", "file_type": "text/x-python", "category": "implementation", "start_line": 67, "end_line": 99, "span_ids": ["pytest_sessionstart", "runtestprotocol", "pytest_runtest_protocol", "pytest_sessionfinish"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_sessionstart(session):\n session._setupstate = SetupState()\n\n\ndef pytest_sessionfinish(session):\n session._setupstate.teardown_all()\n\n\ndef pytest_runtest_protocol(item, nextitem):\n item.ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)\n runtestprotocol(item, nextitem=nextitem)\n item.ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)\n return True\n\n\ndef runtestprotocol(item, log=True, nextitem=None):\n hasrequest = hasattr(item, \"_request\")\n if hasrequest and not item._request:\n item._initrequest()\n rep = call_and_report(item, \"setup\", log)\n reports = [rep]\n if rep.passed:\n if item.config.getoption(\"setupshow\", False):\n show_test_item(item)\n if not item.config.getoption(\"setuponly\", False):\n reports.append(call_and_report(item, \"call\", log))\n reports.append(call_and_report(item, \"teardown\", log, nextitem=nextitem))\n # after all teardown hooks have been called\n # want funcargs and request info to go away\n if hasrequest:\n item._request = False\n item.funcargs = None\n return reports", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_show_test_item_pytest_runtest_setup.item_session__setupstate_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_show_test_item_pytest_runtest_setup.item_session__setupstate_", "embedding": null, "metadata": {"file_path": "src/_pytest/runner.py", "file_name": "runner.py", "file_type": "text/x-python", "category": "implementation", "start_line": 102, "end_line": 115, "span_ids": ["show_test_item", "pytest_runtest_setup"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def show_test_item(item):\n \"\"\"Show test function, parameters and the fixtures of the test item.\"\"\"\n tw = item.config.get_terminal_writer()\n tw.line()\n tw.write(\" \" * 8)\n tw.write(item._nodeid)\n used_fixtures = sorted(item._fixtureinfo.name2fixturedefs.keys())\n if used_fixtures:\n tw.write(\" (fixtures used: {})\".format(\", \".join(used_fixtures)))\n\n\ndef pytest_runtest_setup(item):\n _update_current_test_var(item, \"setup\")\n item.session._setupstate.prepare(item)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_pytest_runtest_call_pytest_runtest_teardown.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_pytest_runtest_call_pytest_runtest_teardown.None_2", "embedding": null, "metadata": {"file_path": "src/_pytest/runner.py", "file_name": "runner.py", "file_type": "text/x-python", "category": "implementation", "start_line": 118, "end_line": 137, "span_ids": ["pytest_runtest_teardown", "pytest_runtest_call"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_runtest_call(item):\n _update_current_test_var(item, \"call\")\n sys.last_type, sys.last_value, sys.last_traceback = (None, None, None)\n try:\n item.runtest()\n except Exception:\n # Store trace info to allow postmortem debugging\n type, value, tb = sys.exc_info()\n tb = tb.tb_next # Skip *this* frame\n sys.last_type = type\n sys.last_value = value\n sys.last_traceback = tb\n del type, value, tb # Get rid of these in this frame\n raise\n\n\ndef pytest_runtest_teardown(item, nextitem):\n _update_current_test_var(item, \"teardown\")\n item.session._setupstate.teardown_exact(item, nextitem)\n _update_current_test_var(item, None)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py__update_current_test_var__update_current_test_var.if_when_.else_.os_environ_pop_var_name_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py__update_current_test_var__update_current_test_var.if_when_.else_.os_environ_pop_var_name_", "embedding": null, "metadata": {"file_path": "src/_pytest/runner.py", "file_name": "runner.py", "file_type": "text/x-python", "category": "implementation", "start_line": 140, "end_line": 153, "span_ids": ["_update_current_test_var"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _update_current_test_var(item, when):\n \"\"\"\n Update PYTEST_CURRENT_TEST to reflect the current item and stage.\n\n If ``when`` is None, delete PYTEST_CURRENT_TEST from the environment.\n \"\"\"\n var_name = \"PYTEST_CURRENT_TEST\"\n if when:\n value = \"{} ({})\".format(item.nodeid, when)\n # don't allow null bytes on environment variables (see #2644, #2957)\n value = value.replace(\"\\x00\", \"(null)\")\n os.environ[var_name] = value\n else:\n os.environ.pop(var_name)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_pytest_report_teststatus_call_runtest_hook.return.CallInfo_from_call_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_pytest_report_teststatus_call_runtest_hook.return.CallInfo_from_call_", "embedding": null, "metadata": {"file_path": "src/_pytest/runner.py", "file_name": "runner.py", "file_type": "text/x-python", "category": "implementation", "start_line": 156, "end_line": 198, "span_ids": ["call_and_report", "check_interactive_exception", "pytest_report_teststatus", "call_runtest_hook"], "tokens": 338}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_report_teststatus(report):\n if report.when in (\"setup\", \"teardown\"):\n if report.failed:\n # category, shortletter, verbose-word\n return \"error\", \"E\", \"ERROR\"\n elif report.skipped:\n return \"skipped\", \"s\", \"SKIPPED\"\n else:\n return \"\", \"\", \"\"\n\n\n#\n# Implementation\n\n\ndef call_and_report(item, when, log=True, **kwds):\n call = call_runtest_hook(item, when, **kwds)\n hook = item.ihook\n report = hook.pytest_runtest_makereport(item=item, call=call)\n if log:\n hook.pytest_runtest_logreport(report=report)\n if check_interactive_exception(call, report):\n hook.pytest_exception_interact(node=item, call=call, report=report)\n return report\n\n\ndef check_interactive_exception(call, report):\n return call.excinfo and not (\n hasattr(report, \"wasxfail\")\n or call.excinfo.errisinstance(Skipped)\n or call.excinfo.errisinstance(bdb.BdbQuit)\n )\n\n\ndef call_runtest_hook(item, when, **kwds):\n hookname = \"pytest_runtest_\" + when\n ihook = getattr(item.ihook, hookname)\n reraise = (Exit,)\n if not item.config.getoption(\"usepdb\", False):\n reraise += (KeyboardInterrupt,)\n return CallInfo.from_call(\n lambda: ihook(item=item, **kwds), when=when, reraise=reraise\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_CallInfo_CallInfo.__repr__.return._CallInfo_when_when_r_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_CallInfo_CallInfo.__repr__.return._CallInfo_when_when_r_", "embedding": null, "metadata": {"file_path": "src/_pytest/runner.py", "file_name": "runner.py", "file_type": "text/x-python", "category": "implementation", "start_line": 201, "end_line": 244, "span_ids": ["CallInfo", "CallInfo.from_call", "CallInfo.__repr__", "CallInfo.result"], "tokens": 322}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s(repr=False)\nclass CallInfo(object):\n \"\"\" Result/Exception info a function invocation. \"\"\"\n\n _result = attr.ib()\n # Optional[ExceptionInfo]\n excinfo = attr.ib()\n start = attr.ib()\n stop = attr.ib()\n when = attr.ib()\n\n @property\n def result(self):\n if self.excinfo is not None:\n raise AttributeError(\"{!r} has no valid result\".format(self))\n return self._result\n\n @classmethod\n def from_call(cls, func, when, reraise=None):\n #: context of invocation: one of \"setup\", \"call\",\n #: \"teardown\", \"memocollect\"\n start = time()\n excinfo = None\n try:\n result = func()\n except: # noqa\n excinfo = ExceptionInfo.from_current()\n if reraise is not None and excinfo.errisinstance(reraise):\n raise\n result = None\n stop = time()\n return cls(start=start, stop=stop, when=when, result=result, excinfo=excinfo)\n\n def __repr__(self):\n if self.excinfo is not None:\n status = \"exception\"\n value = self.excinfo.value\n else:\n # TODO: investigate unification\n value = repr(self._result)\n status = \"result\"\n return \"\".format(\n when=self.when, value=value, status=status\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_pytest_runtest_makereport_pytest_make_collect_report.return.rep": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_pytest_runtest_makereport_pytest_make_collect_report.return.rep", "embedding": null, "metadata": {"file_path": "src/_pytest/runner.py", "file_name": "runner.py", "file_type": "text/x-python", "category": "implementation", "start_line": 247, "end_line": 274, "span_ids": ["pytest_make_collect_report", "pytest_runtest_makereport"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_runtest_makereport(item, call):\n return TestReport.from_item_and_call(item, call)\n\n\ndef pytest_make_collect_report(collector):\n call = CallInfo.from_call(lambda: list(collector.collect()), \"collect\")\n longrepr = None\n if not call.excinfo:\n outcome = \"passed\"\n else:\n from _pytest import nose\n\n skip_exceptions = (Skipped,) + nose.get_skip_exceptions()\n if call.excinfo.errisinstance(skip_exceptions):\n outcome = \"skipped\"\n r = collector._repr_failure_py(call.excinfo, \"line\").reprcrash\n longrepr = (str(r.path), r.lineno, r.message)\n else:\n outcome = \"failed\"\n errorinfo = collector.repr_failure(call.excinfo)\n if not hasattr(errorinfo, \"toterminal\"):\n errorinfo = CollectErrorRepr(errorinfo)\n longrepr = errorinfo\n rep = CollectReport(\n collector.nodeid, outcome, longrepr, getattr(call, \"result\", None)\n )\n rep.call = call # see collect_one_node\n return rep", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_SetupState_SetupState._teardown_towards.if_exc_.six_reraise_exc_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_SetupState_SetupState._teardown_towards.if_exc_.six_reraise_exc_", "embedding": null, "metadata": {"file_path": "src/_pytest/runner.py", "file_name": "runner.py", "file_type": "text/x-python", "category": "implementation", "start_line": 277, "end_line": 346, "span_ids": ["SetupState._callfinalizers", "SetupState._teardown_towards", "SetupState.teardown_all", "SetupState.addfinalizer", "SetupState.teardown_exact", "SetupState", "SetupState._pop_and_teardown", "SetupState._teardown_with_finalization"], "tokens": 531}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SetupState(object):\n \"\"\" shared state for setting up/tearing down test items or collectors. \"\"\"\n\n def __init__(self):\n self.stack = []\n self._finalizers = {}\n\n def addfinalizer(self, finalizer, colitem):\n \"\"\" attach a finalizer to the given colitem.\n if colitem is None, this will add a finalizer that\n is called at the end of teardown_all().\n \"\"\"\n assert colitem and not isinstance(colitem, tuple)\n assert callable(finalizer)\n # assert colitem in self.stack # some unit tests don't setup stack :/\n self._finalizers.setdefault(colitem, []).append(finalizer)\n\n def _pop_and_teardown(self):\n colitem = self.stack.pop()\n self._teardown_with_finalization(colitem)\n\n def _callfinalizers(self, colitem):\n finalizers = self._finalizers.pop(colitem, None)\n exc = None\n while finalizers:\n fin = finalizers.pop()\n try:\n fin()\n except TEST_OUTCOME:\n # XXX Only first exception will be seen by user,\n # ideally all should be reported.\n if exc is None:\n exc = sys.exc_info()\n if exc:\n six.reraise(*exc)\n\n def _teardown_with_finalization(self, colitem):\n self._callfinalizers(colitem)\n if hasattr(colitem, \"teardown\"):\n colitem.teardown()\n for colitem in self._finalizers:\n assert (\n colitem is None or colitem in self.stack or isinstance(colitem, tuple)\n )\n\n def teardown_all(self):\n while self.stack:\n self._pop_and_teardown()\n for key in list(self._finalizers):\n self._teardown_with_finalization(key)\n assert not self._finalizers\n\n def teardown_exact(self, item, nextitem):\n needed_collectors = nextitem and nextitem.listchain() or []\n self._teardown_towards(needed_collectors)\n\n def _teardown_towards(self, needed_collectors):\n exc = None\n while self.stack:\n if self.stack == needed_collectors[: len(self.stack)]:\n break\n try:\n self._pop_and_teardown()\n except TEST_OUTCOME:\n # XXX Only first exception will be seen by user,\n # ideally all should be reported.\n if exc is None:\n exc = sys.exc_info()\n if exc:\n six.reraise(*exc)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_SetupState.prepare_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/runner.py_SetupState.prepare_", "embedding": null, "metadata": {"file_path": "src/_pytest/runner.py", "file_name": "runner.py", "file_type": "text/x-python", "category": "implementation", "start_line": 348, "end_line": 375, "span_ids": ["SetupState.prepare", "collect_one_node"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SetupState(object):\n\n def prepare(self, colitem):\n \"\"\" setup objects along the collector chain to the test-method\n and teardown previously setup objects.\"\"\"\n needed_collectors = colitem.listchain()\n self._teardown_towards(needed_collectors)\n\n # check if the last collection node has raised an error\n for col in self.stack:\n if hasattr(col, \"_prepare_exc\"):\n six.reraise(*col._prepare_exc)\n for col in needed_collectors[len(self.stack) :]:\n self.stack.append(col)\n try:\n col.setup()\n except TEST_OUTCOME:\n col._prepare_exc = sys.exc_info()\n raise\n\n\ndef collect_one_node(collector):\n ihook = collector.ihook\n ihook.pytest_collectstart(collector=collector)\n rep = ihook.pytest_make_collect_report(collector=collector)\n call = rep.__dict__.pop(\"call\", None)\n if call and check_interactive_exception(call, rep):\n ihook.pytest_exception_interact(node=collector, call=call, report=rep)\n return rep", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/setuponly.py_from___future___import_ab_pytest_addoption.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/setuponly.py_from___future___import_ab_pytest_addoption.None_1", "embedding": null, "metadata": {"file_path": "src/_pytest/setuponly.py", "file_name": "setuponly.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 23, "span_ids": ["imports", "pytest_addoption"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\n\nimport pytest\n\n\ndef pytest_addoption(parser):\n group = parser.getgroup(\"debugconfig\")\n group.addoption(\n \"--setuponly\",\n \"--setup-only\",\n action=\"store_true\",\n help=\"only setup fixtures, do not execute tests.\",\n )\n group.addoption(\n \"--setupshow\",\n \"--setup-show\",\n action=\"store_true\",\n help=\"show setup of fixtures while executing tests.\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/setuponly.py_pytest_fixture_setup_pytest_fixture_post_finalizer.if_hasattr_fixturedef_c.if_config_option_setupsho.if_hasattr_fixturedef_c.del_fixturedef_cached_par": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/setuponly.py_pytest_fixture_setup_pytest_fixture_post_finalizer.if_hasattr_fixturedef_c.if_config_option_setupsho.if_hasattr_fixturedef_c.del_fixturedef_cached_par", "embedding": null, "metadata": {"file_path": "src/_pytest/setuponly.py", "file_name": "setuponly.py", "file_type": "text/x-python", "category": "implementation", "start_line": 26, "end_line": 50, "span_ids": ["pytest_fixture_setup", "pytest_fixture_post_finalizer"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.hookimpl(hookwrapper=True)\ndef pytest_fixture_setup(fixturedef, request):\n yield\n config = request.config\n if config.option.setupshow:\n if hasattr(request, \"param\"):\n # Save the fixture parameter so ._show_fixture_action() can\n # display it now and during the teardown (in .finish()).\n if fixturedef.ids:\n if callable(fixturedef.ids):\n fixturedef.cached_param = fixturedef.ids(request.param)\n else:\n fixturedef.cached_param = fixturedef.ids[request.param_index]\n else:\n fixturedef.cached_param = request.param\n _show_fixture_action(fixturedef, \"SETUP\")\n\n\ndef pytest_fixture_post_finalizer(fixturedef):\n if hasattr(fixturedef, \"cached_result\"):\n config = fixturedef._fixturemanager.config\n if config.option.setupshow:\n _show_fixture_action(fixturedef, \"TEARDOWN\")\n if hasattr(fixturedef, \"cached_param\"):\n del fixturedef.cached_param", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/setuponly.py__show_fixture_action_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/setuponly.py__show_fixture_action_", "embedding": null, "metadata": {"file_path": "src/_pytest/setuponly.py", "file_name": "setuponly.py", "file_type": "text/x-python", "category": "implementation", "start_line": 53, "end_line": 89, "span_ids": ["pytest_cmdline_main", "_show_fixture_action"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _show_fixture_action(fixturedef, msg):\n config = fixturedef._fixturemanager.config\n capman = config.pluginmanager.getplugin(\"capturemanager\")\n if capman:\n capman.suspend_global_capture()\n out, err = capman.read_global_capture()\n\n tw = config.get_terminal_writer()\n tw.line()\n tw.write(\" \" * 2 * fixturedef.scopenum)\n tw.write(\n \"{step} {scope} {fixture}\".format(\n step=msg.ljust(8), # align the output to TEARDOWN\n scope=fixturedef.scope[0].upper(),\n fixture=fixturedef.argname,\n )\n )\n\n if msg == \"SETUP\":\n deps = sorted(arg for arg in fixturedef.argnames if arg != \"request\")\n if deps:\n tw.write(\" (fixtures used: {})\".format(\", \".join(deps)))\n\n if hasattr(fixturedef, \"cached_param\"):\n tw.write(\"[{}]\".format(fixturedef.cached_param))\n\n if capman:\n capman.resume_global_capture()\n sys.stdout.write(out)\n sys.stderr.write(err)\n\n\n@pytest.hookimpl(tryfirst=True)\ndef pytest_cmdline_main(config):\n if config.option.setuponly:\n config.option.setupshow = True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/setupplan.py_from___future___import_ab_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/setupplan.py_from___future___import_ab_", "embedding": null, "metadata": {"file_path": "src/_pytest/setupplan.py", "file_name": "setupplan.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 32, "span_ids": ["pytest_cmdline_main", "imports", "pytest_fixture_setup", "pytest_addoption"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pytest\n\n\ndef pytest_addoption(parser):\n group = parser.getgroup(\"debugconfig\")\n group.addoption(\n \"--setupplan\",\n \"--setup-plan\",\n action=\"store_true\",\n help=\"show what fixtures and tests would be executed but \"\n \"don't execute anything.\",\n )\n\n\n@pytest.hookimpl(tryfirst=True)\ndef pytest_fixture_setup(fixturedef, request):\n # Will return a dummy fixture if the setuponly option is provided.\n if request.config.option.setupplan:\n fixturedef.cached_result = (None, None, None)\n return fixturedef.cached_result\n\n\n@pytest.hookimpl(tryfirst=True)\ndef pytest_cmdline_main(config):\n if config.option.setupplan:\n config.option.setuponly = True\n config.option.setupshow = True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/skipping.py_pytest_configure_pytest_configure.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/skipping.py_pytest_configure_pytest_configure.None_2", "embedding": null, "metadata": {"file_path": "src/_pytest/skipping.py", "file_name": "skipping.py", "file_type": "text/x-python", "category": "implementation", "start_line": 33, "end_line": 70, "span_ids": ["pytest_configure"], "tokens": 363}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_configure(config):\n if config.option.runxfail:\n # yay a hack\n import pytest\n\n old = pytest.xfail\n config._cleanup.append(lambda: setattr(pytest, \"xfail\", old))\n\n def nop(*args, **kwargs):\n pass\n\n nop.Exception = xfail.Exception\n setattr(pytest, \"xfail\", nop)\n\n config.addinivalue_line(\n \"markers\",\n \"skip(reason=None): skip the given test function with an optional reason. \"\n 'Example: skip(reason=\"no way of currently testing this\") skips the '\n \"test.\",\n )\n config.addinivalue_line(\n \"markers\",\n \"skipif(condition): skip the given test function if eval(condition) \"\n \"results in a True value. Evaluation happens within the \"\n \"module global context. Example: skipif('sys.platform == \\\"win32\\\"') \"\n \"skips the test if we are on the win32 platform. see \"\n \"https://docs.pytest.org/en/latest/skipping.html\",\n )\n config.addinivalue_line(\n \"markers\",\n \"xfail(condition, reason=None, run=True, raises=None, strict=False): \"\n \"mark the test function as an expected failure if eval(condition) \"\n \"has a True value. Optionally specify a reason for better reporting \"\n \"and run=False if you don't even want to execute the test function. \"\n \"If only specific exception(s) are expected, you can list them in \"\n \"raises, and if the test fails in other ways, it will be reported as \"\n \"a true failure. See https://docs.pytest.org/en/latest/skipping.html\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/skipping.py_pytest_runtest_setup_pytest_runtest_setup.check_xfail_no_run_item_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/skipping.py_pytest_runtest_setup_pytest_runtest_setup.check_xfail_no_run_item_", "embedding": null, "metadata": {"file_path": "src/_pytest/skipping.py", "file_name": "skipping.py", "file_type": "text/x-python", "category": "implementation", "start_line": 73, "end_line": 92, "span_ids": ["pytest_runtest_setup"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@hookimpl(tryfirst=True)\ndef pytest_runtest_setup(item):\n # Check if skip or skipif are specified as pytest marks\n item._skipped_by_mark = False\n eval_skipif = MarkEvaluator(item, \"skipif\")\n if eval_skipif.istrue():\n item._skipped_by_mark = True\n skip(eval_skipif.getexplanation())\n\n for skip_info in item.iter_markers(name=\"skip\"):\n item._skipped_by_mark = True\n if \"reason\" in skip_info.kwargs:\n skip(skip_info.kwargs[\"reason\"])\n elif skip_info.args:\n skip(skip_info.args[0])\n else:\n skip(\"unconditional skip\")\n\n item._evalxfail = MarkEvaluator(item, \"xfail\")\n check_xfail_no_run(item)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/skipping.py_pytest_pyfunc_call_check_xfail_no_run.if_not_item_config_option.if_evalxfail_istrue_.if_not_evalxfail_get_run.xfail_NOTRUN_evalx": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/skipping.py_pytest_pyfunc_call_check_xfail_no_run.if_not_item_config_option.if_evalxfail_istrue_.if_not_evalxfail_get_run.xfail_NOTRUN_evalx", "embedding": null, "metadata": {"file_path": "src/_pytest/skipping.py", "file_name": "skipping.py", "file_type": "text/x-python", "category": "implementation", "start_line": 95, "end_line": 110, "span_ids": ["check_xfail_no_run", "pytest_pyfunc_call"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@hookimpl(hookwrapper=True)\ndef pytest_pyfunc_call(pyfuncitem):\n check_xfail_no_run(pyfuncitem)\n outcome = yield\n passed = outcome.excinfo is None\n if passed:\n check_strict_xfail(pyfuncitem)\n\n\ndef check_xfail_no_run(item):\n \"\"\"check xfail(run=False)\"\"\"\n if not item.config.option.runxfail:\n evalxfail = item._evalxfail\n if evalxfail.istrue():\n if not evalxfail.get(\"run\", True):\n xfail(\"[NOTRUN] \" + evalxfail.getexplanation())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/skipping.py_check_strict_xfail_check_strict_xfail.if_evalxfail_istrue_.if_is_strict_xfail_.fail_XPASS_strict_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/skipping.py_check_strict_xfail_check_strict_xfail.if_evalxfail_istrue_.if_is_strict_xfail_.fail_XPASS_strict_", "embedding": null, "metadata": {"file_path": "src/_pytest/skipping.py", "file_name": "skipping.py", "file_type": "text/x-python", "category": "implementation", "start_line": 113, "end_line": 122, "span_ids": ["check_strict_xfail"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def check_strict_xfail(pyfuncitem):\n \"\"\"check xfail(strict=True) for the given PASSING test\"\"\"\n evalxfail = pyfuncitem._evalxfail\n if evalxfail.istrue():\n strict_default = pyfuncitem.config.getini(\"xfail_strict\")\n is_strict_xfail = evalxfail.get(\"strict\", strict_default)\n if is_strict_xfail:\n del pyfuncitem._evalxfail\n explanation = evalxfail.getexplanation()\n fail(\"[XPASS(strict)] \" + explanation, pytrace=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/skipping.py_pytest_runtest_makereport_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/skipping.py_pytest_runtest_makereport_", "embedding": null, "metadata": {"file_path": "src/_pytest/skipping.py", "file_name": "skipping.py", "file_type": "text/x-python", "category": "implementation", "start_line": 125, "end_line": 187, "span_ids": ["pytest_report_teststatus", "pytest_runtest_makereport"], "tokens": 583}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@hookimpl(hookwrapper=True)\ndef pytest_runtest_makereport(item, call):\n outcome = yield\n rep = outcome.get_result()\n evalxfail = getattr(item, \"_evalxfail\", None)\n # unitttest special case, see setting of _unexpectedsuccess\n if hasattr(item, \"_unexpectedsuccess\") and rep.when == \"call\":\n from _pytest.compat import _is_unittest_unexpected_success_a_failure\n\n if item._unexpectedsuccess:\n rep.longrepr = \"Unexpected success: {}\".format(item._unexpectedsuccess)\n else:\n rep.longrepr = \"Unexpected success\"\n if _is_unittest_unexpected_success_a_failure():\n rep.outcome = \"failed\"\n else:\n rep.outcome = \"passed\"\n rep.wasxfail = rep.longrepr\n elif item.config.option.runxfail:\n pass # don't interefere\n elif call.excinfo and call.excinfo.errisinstance(xfail.Exception):\n rep.wasxfail = \"reason: \" + call.excinfo.value.msg\n rep.outcome = \"skipped\"\n elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue():\n if call.excinfo:\n if evalxfail.invalidraise(call.excinfo.value):\n rep.outcome = \"failed\"\n else:\n rep.outcome = \"skipped\"\n rep.wasxfail = evalxfail.getexplanation()\n elif call.when == \"call\":\n strict_default = item.config.getini(\"xfail_strict\")\n is_strict_xfail = evalxfail.get(\"strict\", strict_default)\n explanation = evalxfail.getexplanation()\n if is_strict_xfail:\n rep.outcome = \"failed\"\n rep.longrepr = \"[XPASS(strict)] {}\".format(explanation)\n else:\n rep.outcome = \"passed\"\n rep.wasxfail = explanation\n elif (\n getattr(item, \"_skipped_by_mark\", False)\n and rep.skipped\n and type(rep.longrepr) is tuple\n ):\n # skipped by mark.skipif; change the location of the failure\n # to point to the item definition, otherwise it will display\n # the location of where the skip exception was raised within pytest\n filename, line, reason = rep.longrepr\n filename, line = item.location[:2]\n rep.longrepr = filename, line, reason\n\n\n# called by terminalreporter progress reporting\n\n\ndef pytest_report_teststatus(report):\n if hasattr(report, \"wasxfail\"):\n if report.skipped:\n return \"xfailed\", \"x\", \"XFAIL\"\n elif report.passed:\n return \"xpassed\", \"X\", \"XPASS\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/stepwise.py_pytest_pytest_configure.config_pluginmanager_regi": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/stepwise.py_pytest_pytest_configure.config_pluginmanager_regi", "embedding": null, "metadata": {"file_path": "src/_pytest/stepwise.py", "file_name": "stepwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 23, "span_ids": ["imports", "pytest_addoption", "pytest_configure"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n\ndef pytest_addoption(parser):\n group = parser.getgroup(\"general\")\n group.addoption(\n \"--sw\",\n \"--stepwise\",\n action=\"store_true\",\n dest=\"stepwise\",\n help=\"exit on test failure and continue from last failing test next time\",\n )\n group.addoption(\n \"--stepwise-skip\",\n action=\"store_true\",\n dest=\"stepwise_skip\",\n help=\"ignore the first failing test but stop on the next failing test\",\n )\n\n\n@pytest.hookimpl\ndef pytest_configure(config):\n config.pluginmanager.register(StepwisePlugin(config), \"stepwiseplugin\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/stepwise.py_StepwisePlugin_StepwisePlugin.pytest_collectreport.if_self_active_and_report.self.session.shouldstop._": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/stepwise.py_StepwisePlugin_StepwisePlugin.pytest_collectreport.if_self_active_and_report.self.session.shouldstop._", "embedding": null, "metadata": {"file_path": "src/_pytest/stepwise.py", "file_name": "stepwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 26, "end_line": 76, "span_ids": ["StepwisePlugin.pytest_collectreport", "StepwisePlugin", "StepwisePlugin.pytest_collection_modifyitems", "StepwisePlugin.pytest_sessionstart"], "tokens": 329}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class StepwisePlugin:\n def __init__(self, config):\n self.config = config\n self.active = config.getvalue(\"stepwise\")\n self.session = None\n\n if self.active:\n self.lastfailed = config.cache.get(\"cache/stepwise\", None)\n self.skip = config.getvalue(\"stepwise_skip\")\n\n def pytest_sessionstart(self, session):\n self.session = session\n\n def pytest_collection_modifyitems(self, session, config, items):\n if not self.active:\n return\n if not self.lastfailed:\n self.report_status = \"no previously failed tests, not skipping.\"\n return\n\n already_passed = []\n found = False\n\n # Make a list of all tests that have been run before the last failing one.\n for item in items:\n if item.nodeid == self.lastfailed:\n found = True\n break\n else:\n already_passed.append(item)\n\n # If the previously failed test was not found among the test items,\n # do not skip any tests.\n if not found:\n self.report_status = \"previously failed test not found, not skipping.\"\n already_passed = []\n else:\n self.report_status = \"skipping {} already passed items.\".format(\n len(already_passed)\n )\n\n for item in already_passed:\n items.remove(item)\n\n config.hook.pytest_deselected(items=already_passed)\n\n def pytest_collectreport(self, report):\n if self.active and report.failed:\n self.session.shouldstop = (\n \"Error when collecting test, stopping test execution.\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/stepwise.py_StepwisePlugin.pytest_runtest_logreport_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/stepwise.py_StepwisePlugin.pytest_runtest_logreport_", "embedding": null, "metadata": {"file_path": "src/_pytest/stepwise.py", "file_name": "stepwise.py", "file_type": "text/x-python", "category": "implementation", "start_line": 78, "end_line": 115, "span_ids": ["StepwisePlugin.pytest_runtest_logreport", "StepwisePlugin.pytest_sessionfinish", "StepwisePlugin.pytest_report_collectionfinish"], "tokens": 311}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class StepwisePlugin:\n\n def pytest_runtest_logreport(self, report):\n # Skip this hook if plugin is not active or the test is xfailed.\n if not self.active or \"xfail\" in report.keywords:\n return\n\n if report.failed:\n if self.skip:\n # Remove test from the failed ones (if it exists) and unset the skip option\n # to make sure the following tests will not be skipped.\n if report.nodeid == self.lastfailed:\n self.lastfailed = None\n\n self.skip = False\n else:\n # Mark test as the last failing and interrupt the test session.\n self.lastfailed = report.nodeid\n self.session.shouldstop = (\n \"Test failed, continuing from this test next run.\"\n )\n\n else:\n # If the test was actually run and did pass.\n if report.when == \"call\":\n # Remove test from the failed ones, if exists.\n if report.nodeid == self.lastfailed:\n self.lastfailed = None\n\n def pytest_report_collectionfinish(self):\n if self.active and self.config.getoption(\"verbose\") >= 0:\n return \"stepwise: %s\" % self.report_status\n\n def pytest_sessionfinish(self, session):\n if self.active:\n self.config.cache.set(\"cache/stepwise\", self.lastfailed)\n else:\n # Clear the list of failing tests if the plugin is not active.\n self.config.cache.set(\"cache/stepwise\", [])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_MoreQuietAction_MoreQuietAction.__call__.namespace.quiet.getattr_namespace_quiet": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_MoreQuietAction_MoreQuietAction.__call__.namespace.quiet.getattr_namespace_quiet", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 34, "end_line": 56, "span_ids": ["MoreQuietAction", "MoreQuietAction.__call__"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class MoreQuietAction(argparse.Action):\n \"\"\"\n a modified copy of the argparse count action which counts down and updates\n the legacy quiet attribute at the same time\n\n used to unify verbosity handling\n \"\"\"\n\n def __init__(self, option_strings, dest, default=None, required=False, help=None):\n super(MoreQuietAction, self).__init__(\n option_strings=option_strings,\n dest=dest,\n nargs=0,\n default=default,\n required=required,\n help=help,\n )\n\n def __call__(self, parser, namespace, values, option_string=None):\n new_count = getattr(namespace, self.dest, 0) - 1\n setattr(namespace, self.dest, new_count)\n # todo Deprecate config.quiet\n namespace.quiet = getattr(namespace, \"quiet\", 0) + 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_pytest_addoption_pytest_addoption.parser_addini_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_pytest_addoption_pytest_addoption.parser_addini_", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 59, "end_line": 147, "span_ids": ["pytest_addoption"], "tokens": 632}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_addoption(parser):\n group = parser.getgroup(\"terminal reporting\", \"reporting\", after=\"general\")\n group._addoption(\n \"-v\",\n \"--verbose\",\n action=\"count\",\n default=0,\n dest=\"verbose\",\n help=\"increase verbosity.\",\n ),\n group._addoption(\n \"-q\",\n \"--quiet\",\n action=MoreQuietAction,\n default=0,\n dest=\"verbose\",\n help=\"decrease verbosity.\",\n ),\n group._addoption(\n \"--verbosity\", dest=\"verbose\", type=int, default=0, help=\"set verbosity\"\n )\n group._addoption(\n \"-r\",\n action=\"store\",\n dest=\"reportchars\",\n default=\"\",\n metavar=\"chars\",\n help=\"show extra test summary info as specified by chars: (f)ailed, \"\n \"(E)rror, (s)kipped, (x)failed, (X)passed, \"\n \"(p)assed, (P)assed with output, (a)ll except passed (p/P), or (A)ll. \"\n \"Warnings are displayed at all times except when \"\n \"--disable-warnings is set.\",\n )\n group._addoption(\n \"--disable-warnings\",\n \"--disable-pytest-warnings\",\n default=False,\n dest=\"disable_warnings\",\n action=\"store_true\",\n help=\"disable warnings summary\",\n )\n group._addoption(\n \"-l\",\n \"--showlocals\",\n action=\"store_true\",\n dest=\"showlocals\",\n default=False,\n help=\"show locals in tracebacks (disabled by default).\",\n )\n group._addoption(\n \"--tb\",\n metavar=\"style\",\n action=\"store\",\n dest=\"tbstyle\",\n default=\"auto\",\n choices=[\"auto\", \"long\", \"short\", \"no\", \"line\", \"native\"],\n help=\"traceback print mode (auto/long/short/line/native/no).\",\n )\n group._addoption(\n \"--show-capture\",\n action=\"store\",\n dest=\"showcapture\",\n choices=[\"no\", \"stdout\", \"stderr\", \"log\", \"all\"],\n default=\"all\",\n help=\"Controls how captured stdout/stderr/log is shown on failed tests. \"\n \"Default is 'all'.\",\n )\n group._addoption(\n \"--fulltrace\",\n \"--full-trace\",\n action=\"store_true\",\n default=False,\n help=\"don't cut any tracebacks (default is to cut).\",\n )\n group._addoption(\n \"--color\",\n metavar=\"color\",\n action=\"store\",\n dest=\"color\",\n default=\"auto\",\n choices=[\"yes\", \"no\", \"auto\"],\n help=\"color terminal output (yes/no/auto).\",\n )\n\n parser.addini(\n \"console_output_style\",\n help='console output: \"classic\", or with additional progress information (\"progress\" (percentage) | \"count\").',\n default=\"progress\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_pytest_configure_getreportopt.return.reportopts": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_pytest_configure_getreportopt.return.reportopts", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 149, "end_line": 176, "span_ids": ["getreportopt", "pytest_configure"], "tokens": 216}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_configure(config):\n reporter = TerminalReporter(config, sys.stdout)\n config.pluginmanager.register(reporter, \"terminalreporter\")\n if config.option.debug or config.option.traceconfig:\n\n def mywriter(tags, args):\n msg = \" \".join(map(str, args))\n reporter.write_line(\"[traceconfig] \" + msg)\n\n config.trace.root.setprocessor(\"pytest:config\", mywriter)\n\n\ndef getreportopt(config):\n reportopts = \"\"\n reportchars = config.option.reportchars\n if not config.option.disable_warnings and \"w\" not in reportchars:\n reportchars += \"w\"\n elif config.option.disable_warnings and \"w\" in reportchars:\n reportchars = reportchars.replace(\"w\", \"\")\n for char in reportchars:\n if char == \"a\":\n reportopts = \"sxXwEf\"\n elif char == \"A\":\n reportopts = \"sxXwEfpP\"\n break\n elif char not in reportopts:\n reportopts += char\n return reportopts", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_pytest_report_teststatus_WarningReport.count_towards_summary.True": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_pytest_report_teststatus_WarningReport.count_towards_summary.True", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 180, "end_line": 207, "span_ids": ["pytest_report_teststatus", "WarningReport"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.hookimpl(trylast=True) # after _pytest.runner\ndef pytest_report_teststatus(report):\n if report.passed:\n letter = \".\"\n elif report.skipped:\n letter = \"s\"\n elif report.failed:\n letter = \"F\"\n if report.when != \"call\":\n letter = \"f\"\n return report.outcome, letter, report.outcome.upper()\n\n\n@attr.s\nclass WarningReport(object):\n \"\"\"\n Simple structure to hold warnings information captured by ``pytest_warning_captured``.\n\n :ivar str message: user friendly message about the warning\n :ivar str|None nodeid: node id that generated the warning (see ``get_location``).\n :ivar tuple|py.path.local fslocation:\n file system location of the source of the warning (see ``get_location``).\n \"\"\"\n\n message = attr.ib()\n nodeid = attr.ib(default=None)\n fslocation = attr.ib(default=None)\n count_towards_summary = True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_WarningReport.get_location_WarningReport.get_location.return.None": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_WarningReport.get_location_WarningReport.get_location.return.None", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 209, "end_line": 225, "span_ids": ["WarningReport.get_location"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s\nclass WarningReport(object):\n\n def get_location(self, config):\n \"\"\"\n Returns the more user-friendly information about the location\n of a warning, or None.\n \"\"\"\n if self.nodeid:\n return self.nodeid\n if self.fslocation:\n if isinstance(self.fslocation, tuple) and len(self.fslocation) >= 2:\n filename, linenum = self.fslocation[:2]\n relpath = py.path.local(filename).relto(config.invocation_dir)\n if not relpath:\n relpath = str(filename)\n return \"%s:%s\" % (relpath, linenum)\n else:\n return str(self.fslocation)\n return None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter_TerminalReporter.__init__.self._collect_report_last_write.None": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter_TerminalReporter.__init__.self._collect_report_last_write.None", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 228, "end_line": 251, "span_ids": ["TerminalReporter"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n def __init__(self, config, file=None):\n import _pytest.config\n\n self.config = config\n self._numcollected = 0\n self._session = None\n self._showfspath = None\n\n self.stats = {}\n self.startdir = config.invocation_dir\n if file is None:\n file = sys.stdout\n self._tw = _pytest.config.create_terminal_writer(config, file)\n # self.writer will be deprecated in pytest-3.4\n self.writer = self._tw\n self._screen_width = self._tw.fullwidth\n self.currentfspath = None\n self.reportchars = getreportopt(config)\n self.hasmarkup = self._tw.hasmarkup\n self.isatty = file.isatty()\n self._progress_nodeids_reported = set()\n self._show_progress_info = self._determine_show_progress_info()\n self._collect_report_last_write = None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._determine_show_progress_info_TerminalReporter._determine_show_progress_info.return.False": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._determine_show_progress_info_TerminalReporter._determine_show_progress_info.return.False", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 253, "end_line": 264, "span_ids": ["TerminalReporter._determine_show_progress_info"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def _determine_show_progress_info(self):\n \"\"\"Return True if we should display progress information based on the current config\"\"\"\n # do not show progress if we are not capturing output (#3038)\n if self.config.getoption(\"capture\", \"no\") == \"no\":\n return False\n # do not show progress if we are showing fixture setup/teardown\n if self.config.getoption(\"setupshow\", False):\n return False\n cfg = self.config.getini(\"console_output_style\")\n if cfg in (\"progress\", \"count\"):\n return cfg\n return False", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.verbosity_TerminalReporter.hasopt.return.char_in_self_reportchars": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.verbosity_TerminalReporter.hasopt.return.char_in_self_reportchars", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 266, "end_line": 290, "span_ids": ["TerminalReporter.verbosity", "TerminalReporter.showfspath", "TerminalReporter.showlongtestinfo", "TerminalReporter.showfspath_4", "TerminalReporter.showheader", "TerminalReporter.hasopt"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n @property\n def verbosity(self):\n return self.config.option.verbose\n\n @property\n def showheader(self):\n return self.verbosity >= 0\n\n @property\n def showfspath(self):\n if self._showfspath is None:\n return self.verbosity >= 0\n return self._showfspath\n\n @showfspath.setter\n def showfspath(self, value):\n self._showfspath = value\n\n @property\n def showlongtestinfo(self):\n return self.verbosity > 0\n\n def hasopt(self, char):\n char = {\"xfailed\": \"x\", \"skipped\": \"s\"}.get(char, char)\n return char in self.reportchars", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.write_fspath_result_TerminalReporter.write_fspath_result.self__tw_write_res_mar": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.write_fspath_result_TerminalReporter.write_fspath_result.self__tw_write_res_mar", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 292, "end_line": 303, "span_ids": ["TerminalReporter.write_fspath_result"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def write_fspath_result(self, nodeid, res, **markup):\n fspath = self.config.rootdir.join(nodeid.split(\"::\")[0])\n # NOTE: explicitly check for None to work around py bug, and for less\n # overhead in general (https://github.com/pytest-dev/py/pull/207).\n if self.currentfspath is None or fspath != self.currentfspath:\n if self.currentfspath is not None and self._show_progress_info:\n self._write_progress_information_filling_space()\n self.currentfspath = fspath\n fspath = self.startdir.bestrelpath(fspath)\n self._tw.line()\n self._tw.write(fspath + \" \")\n self._tw.write(res, **markup)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.write_ensure_prefix_TerminalReporter.write_line.self__tw_line_line_mar": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.write_ensure_prefix_TerminalReporter.write_line.self__tw_line_line_mar", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 305, "end_line": 326, "span_ids": ["TerminalReporter.write_line", "TerminalReporter.write", "TerminalReporter.ensure_newline", "TerminalReporter.write_ensure_prefix"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def write_ensure_prefix(self, prefix, extra=\"\", **kwargs):\n if self.currentfspath != prefix:\n self._tw.line()\n self.currentfspath = prefix\n self._tw.write(prefix)\n if extra:\n self._tw.write(extra, **kwargs)\n self.currentfspath = -2\n\n def ensure_newline(self):\n if self.currentfspath:\n self._tw.line()\n self.currentfspath = None\n\n def write(self, content, **markup):\n self._tw.write(content, **markup)\n\n def write_line(self, line, **markup):\n if not isinstance(line, six.text_type):\n line = six.text_type(line, errors=\"replace\")\n self.ensure_newline()\n self._tw.line(line, **markup)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.rewrite_TerminalReporter.rewrite.self__tw_write_r_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.rewrite_TerminalReporter.rewrite.self__tw_write_r_lin", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 328, "end_line": 344, "span_ids": ["TerminalReporter.rewrite"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def rewrite(self, line, **markup):\n \"\"\"\n Rewinds the terminal cursor to the beginning and writes the given line.\n\n :kwarg erase: if True, will also add spaces until the full terminal width to ensure\n previous lines are properly erased.\n\n The rest of the keyword arguments are markup instructions.\n \"\"\"\n erase = markup.pop(\"erase\", False)\n if erase:\n fill_count = self._tw.fullwidth - len(line) - 1\n fill = \" \" * fill_count\n else:\n fill = \"\"\n line = str(line)\n self._tw.write(\"\\r\" + line + fill, **markup)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.write_sep_TerminalReporter.pytest_internalerror.return.1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.write_sep_TerminalReporter.pytest_internalerror.return.1", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 346, "end_line": 359, "span_ids": ["TerminalReporter.write_sep", "TerminalReporter.pytest_internalerror", "TerminalReporter.section", "TerminalReporter.line"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def write_sep(self, sep, title=None, **markup):\n self.ensure_newline()\n self._tw.sep(sep, title, **markup)\n\n def section(self, title, sep=\"=\", **kw):\n self._tw.sep(sep, title, **kw)\n\n def line(self, msg, **kw):\n self._tw.line(msg, **kw)\n\n def pytest_internalerror(self, excrepr):\n for line in six.text_type(excrepr).split(\"\\n\"):\n self.write_line(\"INTERNALERROR> \" + line)\n return 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.pytest_warning_captured_TerminalReporter.pytest_warning_captured.warnings_append_warning_r": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.pytest_warning_captured_TerminalReporter.pytest_warning_captured.warnings_append_warning_r", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 361, "end_line": 373, "span_ids": ["TerminalReporter.pytest_warning_captured"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def pytest_warning_captured(self, warning_message, item):\n # from _pytest.nodes import get_fslocation_from_item\n from _pytest.warnings import warning_record_to_str\n\n warnings = self.stats.setdefault(\"warnings\", [])\n fslocation = warning_message.filename, warning_message.lineno\n message = warning_record_to_str(warning_message)\n\n nodeid = item.nodeid if item is not None else \"\"\n warning_report = WarningReport(\n fslocation=fslocation, message=message, nodeid=nodeid\n )\n warnings.append(warning_report)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.pytest_plugin_registered_TerminalReporter.pytest_runtest_logstart.if_self_showlongtestinfo_.elif_self_showfspath_.self_write_fspath_result_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.pytest_plugin_registered_TerminalReporter.pytest_runtest_logstart.if_self_showlongtestinfo_.elif_self_showfspath_.self_write_fspath_result_", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 375, "end_line": 394, "span_ids": ["TerminalReporter.pytest_plugin_registered", "TerminalReporter.pytest_runtest_logstart", "TerminalReporter.pytest_deselected"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def pytest_plugin_registered(self, plugin):\n if self.config.option.traceconfig:\n msg = \"PLUGIN registered: %s\" % (plugin,)\n # XXX this event may happen during setup/teardown time\n # which unfortunately captures our output here\n # which garbles our output if we use self.write_line\n self.write_line(msg)\n\n def pytest_deselected(self, items):\n self.stats.setdefault(\"deselected\", []).extend(items)\n\n def pytest_runtest_logstart(self, nodeid, location):\n # ensure that the path is printed before the\n # 1st test of a module starts running\n if self.showlongtestinfo:\n line = self._locationline(nodeid, *location)\n self.write_ensure_prefix(line, \"\")\n elif self.showfspath:\n fsid = nodeid.split(\"::\")[0]\n self.write_fspath_result(fsid, \"\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.pytest_runtest_logreport_TerminalReporter.pytest_runtest_logreport.if_self_verbosity_0_.else_.if_not_running_xdist_.else_.self.currentfspath._2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.pytest_runtest_logreport_TerminalReporter.pytest_runtest_logreport.if_self_verbosity_0_.else_.if_not_running_xdist_.else_.self.currentfspath._2", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 396, "end_line": 445, "span_ids": ["TerminalReporter.pytest_runtest_logreport"], "tokens": 421}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def pytest_runtest_logreport(self, report):\n self._tests_ran = True\n rep = report\n res = self.config.hook.pytest_report_teststatus(report=rep, config=self.config)\n category, letter, word = res\n if isinstance(word, tuple):\n word, markup = word\n else:\n markup = None\n self.stats.setdefault(category, []).append(rep)\n if not letter and not word:\n # probably passed setup/teardown\n return\n running_xdist = hasattr(rep, \"node\")\n if markup is None:\n was_xfail = hasattr(report, \"wasxfail\")\n if rep.passed and not was_xfail:\n markup = {\"green\": True}\n elif rep.passed and was_xfail:\n markup = {\"yellow\": True}\n elif rep.failed:\n markup = {\"red\": True}\n elif rep.skipped:\n markup = {\"yellow\": True}\n else:\n markup = {}\n if self.verbosity <= 0:\n if not running_xdist and self.showfspath:\n self.write_fspath_result(rep.nodeid, letter, **markup)\n else:\n self._tw.write(letter, **markup)\n else:\n self._progress_nodeids_reported.add(rep.nodeid)\n line = self._locationline(rep.nodeid, *rep.location)\n if not running_xdist:\n self.write_ensure_prefix(line, word, **markup)\n if self._show_progress_info:\n self._write_progress_information_filling_space()\n else:\n self.ensure_newline()\n self._tw.write(\"[%s]\" % rep.node.gateway.id)\n if self._show_progress_info:\n self._tw.write(\n self._get_progress_information_message() + \" \", cyan=True\n )\n else:\n self._tw.write(\" \")\n self._tw.write(word, **markup)\n self._tw.write(\" \" + line)\n self.currentfspath = -2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.pytest_runtest_logfinish_TerminalReporter.pytest_runtest_logfinish.if_self_verbosity_0_an.if_is_last_item_.else_.if_past_edge_.self__tw_write_msg_n_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.pytest_runtest_logfinish_TerminalReporter.pytest_runtest_logfinish.if_self_verbosity_0_an.if_is_last_item_.else_.if_past_edge_.self__tw_write_msg_n_", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 447, "end_line": 466, "span_ids": ["TerminalReporter.pytest_runtest_logfinish"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def pytest_runtest_logfinish(self, nodeid):\n if self.verbosity <= 0 and self._show_progress_info:\n if self._show_progress_info == \"count\":\n num_tests = self._session.testscollected\n progress_length = len(\" [{}/{}]\".format(str(num_tests), str(num_tests)))\n else:\n progress_length = len(\" [100%]\")\n\n self._progress_nodeids_reported.add(nodeid)\n is_last_item = (\n len(self._progress_nodeids_reported) == self._session.testscollected\n )\n if is_last_item:\n self._write_progress_information_filling_space()\n else:\n w = self._width_of_current_line\n past_edge = w + progress_length + 1 >= self._screen_width\n if past_edge:\n msg = self._get_progress_information_message()\n self._tw.write(msg + \"\\n\", cyan=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._get_progress_information_message_TerminalReporter._get_progress_information_message.if_self__show_progress_in.else_.return._100_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._get_progress_information_message_TerminalReporter._get_progress_information_message.if_self__show_progress_in.else_.return._100_", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 468, "end_line": 481, "span_ids": ["TerminalReporter._get_progress_information_message"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def _get_progress_information_message(self):\n collected = self._session.testscollected\n if self._show_progress_info == \"count\":\n if collected:\n progress = self._progress_nodeids_reported\n counter_format = \"{{:{}d}}\".format(len(str(collected)))\n format_string = \" [{}/{{}}]\".format(counter_format)\n return format_string.format(len(progress), collected)\n return \" [ {} / {} ]\".format(collected, collected)\n else:\n if collected:\n progress = len(self._progress_nodeids_reported) * 100 // collected\n return \" [{:3d}%]\".format(progress)\n return \" [100%]\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._write_progress_information_filling_space_TerminalReporter.pytest_collectreport.if_self_isatty_.self_report_collect_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._write_progress_information_filling_space_TerminalReporter.pytest_collectreport.if_self_isatty_.self_report_collect_", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 483, "end_line": 514, "span_ids": ["TerminalReporter.pytest_collection", "TerminalReporter._width_of_current_line", "TerminalReporter._write_progress_information_filling_space", "TerminalReporter.pytest_collectreport"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def _write_progress_information_filling_space(self):\n msg = self._get_progress_information_message()\n w = self._width_of_current_line\n fill = self._tw.fullwidth - w - 1\n self.write(msg.rjust(fill), cyan=True)\n\n @property\n def _width_of_current_line(self):\n \"\"\"Return the width of current line, using the superior implementation of py-1.6 when available\"\"\"\n try:\n return self._tw.width_of_current_line\n except AttributeError:\n # py < 1.6.0\n return self._tw.chars_on_current_line\n\n def pytest_collection(self):\n if self.isatty:\n if self.config.option.verbose >= 0:\n self.write(\"collecting ... \", bold=True)\n self._collect_report_last_write = time.time()\n elif self.config.option.verbose >= 1:\n self.write(\"collecting ... \", bold=True)\n\n def pytest_collectreport(self, report):\n if report.failed:\n self.stats.setdefault(\"error\", []).append(report)\n elif report.skipped:\n self.stats.setdefault(\"skipped\", []).append(report)\n items = [x for x in report.result if isinstance(x, pytest.Item)]\n self._numcollected += len(items)\n if self.isatty:\n self.report_collect()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.report_collect_TerminalReporter.report_collect.if_self_isatty_.else_.self_write_line_line_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.report_collect_TerminalReporter.report_collect.if_self_isatty_.else_.self_write_line_line_", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 516, "end_line": 554, "span_ids": ["TerminalReporter.report_collect"], "tokens": 324}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def report_collect(self, final=False):\n if self.config.option.verbose < 0:\n return\n\n if not final:\n # Only write \"collecting\" report every 0.5s.\n t = time.time()\n if (\n self._collect_report_last_write is not None\n and self._collect_report_last_write > t - REPORT_COLLECTING_RESOLUTION\n ):\n return\n self._collect_report_last_write = t\n\n errors = len(self.stats.get(\"error\", []))\n skipped = len(self.stats.get(\"skipped\", []))\n deselected = len(self.stats.get(\"deselected\", []))\n selected = self._numcollected - errors - skipped - deselected\n if final:\n line = \"collected \"\n else:\n line = \"collecting \"\n line += (\n str(self._numcollected) + \" item\" + (\"\" if self._numcollected == 1 else \"s\")\n )\n if errors:\n line += \" / %d errors\" % errors\n if deselected:\n line += \" / %d deselected\" % deselected\n if skipped:\n line += \" / %d skipped\" % skipped\n if self._numcollected > selected > 0:\n line += \" / %d selected\" % selected\n if self.isatty:\n self.rewrite(line, bold=True, erase=True)\n if final:\n self.write(\"\\n\")\n else:\n self.write_line(line)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.pytest_sessionstart_TerminalReporter.pytest_sessionstart.self__write_report_lines_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.pytest_sessionstart_TerminalReporter.pytest_sessionstart.self__write_report_lines_", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 556, "end_line": 583, "span_ids": ["TerminalReporter.pytest_sessionstart"], "tokens": 269}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n @pytest.hookimpl(trylast=True)\n def pytest_sessionstart(self, session):\n self._session = session\n self._sessionstarttime = time.time()\n if not self.showheader:\n return\n self.write_sep(\"=\", \"test session starts\", bold=True)\n verinfo = platform.python_version()\n msg = \"platform %s -- Python %s\" % (sys.platform, verinfo)\n if hasattr(sys, \"pypy_version_info\"):\n verinfo = \".\".join(map(str, sys.pypy_version_info[:3]))\n msg += \"[pypy-%s-%s]\" % (verinfo, sys.pypy_version_info[3])\n msg += \", pytest-%s, py-%s, pluggy-%s\" % (\n pytest.__version__,\n py.__version__,\n pluggy.__version__,\n )\n if (\n self.verbosity > 0\n or self.config.option.debug\n or getattr(self.config.option, \"pastebin\", None)\n ):\n msg += \" -- \" + str(sys.executable)\n self.write_line(msg)\n lines = self.config.hook.pytest_report_header(\n config=self.config, startdir=self.startdir\n )\n self._write_report_lines_from_hooks(lines)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._write_report_lines_from_hooks_TerminalReporter.pytest_report_header.return.result": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._write_report_lines_from_hooks_TerminalReporter.pytest_report_header.return.result", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 585, "end_line": 605, "span_ids": ["TerminalReporter.pytest_report_header", "TerminalReporter._write_report_lines_from_hooks"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def _write_report_lines_from_hooks(self, lines):\n lines.reverse()\n for line in collapse(lines):\n self.write_line(line)\n\n def pytest_report_header(self, config):\n line = \"rootdir: %s\" % config.rootdir\n\n if config.inifile:\n line += \", inifile: \" + config.rootdir.bestrelpath(config.inifile)\n\n testpaths = config.getini(\"testpaths\")\n if testpaths and config.args == testpaths:\n rel_paths = [config.rootdir.bestrelpath(x) for x in testpaths]\n line += \", testpaths: {}\".format(\", \".join(rel_paths))\n result = [line]\n\n plugininfo = config.pluginmanager.list_plugin_distinfo()\n if plugininfo:\n result.append(\"plugins: %s\" % \", \".join(_plugin_nameversions(plugininfo)))\n return result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.pytest_collection_finish_TerminalReporter.pytest_collection_finish.None_1.if_self_stats_get_failed.for_rep_in_self_stats_get.rep_toterminal_self__tw_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.pytest_collection_finish_TerminalReporter.pytest_collection_finish.None_1.if_self_stats_get_failed.for_rep_in_self_stats_get.rep_toterminal_self__tw_", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 607, "end_line": 622, "span_ids": ["TerminalReporter.pytest_collection_finish"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def pytest_collection_finish(self, session):\n self.report_collect(True)\n\n if self.config.getoption(\"collectonly\"):\n self._printcollecteditems(session.items)\n\n lines = self.config.hook.pytest_report_collectionfinish(\n config=self.config, startdir=self.startdir, items=session.items\n )\n self._write_report_lines_from_hooks(lines)\n\n if self.config.getoption(\"collectonly\"):\n if self.stats.get(\"failed\"):\n self._tw.sep(\"!\", \"collection failures\")\n for rep in self.stats.get(\"failed\"):\n rep.toterminal(self._tw)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._printcollecteditems_TerminalReporter._printcollecteditems.for_item_in_items_.for_col_in_needed_collect.if_self_config_option_ver.if_hasattr_col__obj_a.for_line_in_col__obj___do.self__tw_line_s_s_i": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._printcollecteditems_TerminalReporter._printcollecteditems.for_item_in_items_.for_col_in_needed_collect.if_self_config_option_ver.if_hasattr_col__obj_a.for_line_in_col__obj___do.self__tw_line_s_s_i", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 624, "end_line": 657, "span_ids": ["TerminalReporter._printcollecteditems"], "tokens": 338}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def _printcollecteditems(self, items):\n # to print out items and their parent collectors\n # we take care to leave out Instances aka ()\n # because later versions are going to get rid of them anyway\n if self.config.option.verbose < 0:\n if self.config.option.verbose < -1:\n counts = {}\n for item in items:\n name = item.nodeid.split(\"::\", 1)[0]\n counts[name] = counts.get(name, 0) + 1\n for name, count in sorted(counts.items()):\n self._tw.line(\"%s: %d\" % (name, count))\n else:\n for item in items:\n self._tw.line(item.nodeid)\n return\n stack = []\n indent = \"\"\n for item in items:\n needed_collectors = item.listchain()[1:] # strip root node\n while stack:\n if stack == needed_collectors[: len(stack)]:\n break\n stack.pop()\n for col in needed_collectors[len(stack) :]:\n stack.append(col)\n if col.name == \"()\": # Skip Instances.\n continue\n indent = (len(stack) - 1) * \" \"\n self._tw.line(\"%s%s\" % (indent, col))\n if self.config.option.verbose >= 1:\n if hasattr(col, \"_obj\") and col._obj.__doc__:\n for line in col._obj.__doc__.strip().splitlines():\n self._tw.line(\"%s%s\" % (indent + \" \", line.strip()))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.pytest_sessionfinish_TerminalReporter.pytest_sessionfinish.self_summary_stats_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.pytest_sessionfinish_TerminalReporter.pytest_sessionfinish.self_summary_stats_", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 659, "end_line": 678, "span_ids": ["TerminalReporter.pytest_sessionfinish"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n @pytest.hookimpl(hookwrapper=True)\n def pytest_sessionfinish(self, exitstatus):\n outcome = yield\n outcome.get_result()\n self._tw.line(\"\")\n summary_exit_codes = (\n EXIT_OK,\n EXIT_TESTSFAILED,\n EXIT_INTERRUPTED,\n EXIT_USAGEERROR,\n EXIT_NOTESTSCOLLECTED,\n )\n if exitstatus in summary_exit_codes:\n self.config.hook.pytest_terminal_summary(\n terminalreporter=self, exitstatus=exitstatus, config=self.config\n )\n if exitstatus == EXIT_INTERRUPTED:\n self._report_keyboardinterrupt()\n del self._keyboardinterrupt_memo\n self.summary_stats()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.pytest_terminal_summary_TerminalReporter.pytest_unconfigure.if_hasattr_self__keyboa.self__report_keyboardinte": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.pytest_terminal_summary_TerminalReporter.pytest_unconfigure.if_hasattr_self__keyboa.self__report_keyboardinte", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 680, "end_line": 696, "span_ids": ["TerminalReporter.pytest_terminal_summary", "TerminalReporter.pytest_unconfigure", "TerminalReporter.pytest_keyboard_interrupt"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n @pytest.hookimpl(hookwrapper=True)\n def pytest_terminal_summary(self):\n self.summary_errors()\n self.summary_failures()\n self.summary_warnings()\n self.summary_passes()\n yield\n self.short_test_summary()\n # Display any extra warnings from teardown here (if any).\n self.summary_warnings()\n\n def pytest_keyboard_interrupt(self, excinfo):\n self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)\n\n def pytest_unconfigure(self):\n if hasattr(self, \"_keyboardinterrupt_memo\"):\n self._report_keyboardinterrupt()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._report_keyboardinterrupt_TerminalReporter._report_keyboardinterrupt.if_KeyboardInterrupt_in.if_self_config_option_ful.else_.self__tw_line_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._report_keyboardinterrupt_TerminalReporter._report_keyboardinterrupt.if_KeyboardInterrupt_in.if_self_config_option_ful.else_.self__tw_line_", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 698, "end_line": 710, "span_ids": ["TerminalReporter._report_keyboardinterrupt"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def _report_keyboardinterrupt(self):\n excrepr = self._keyboardinterrupt_memo\n msg = excrepr.reprcrash.message\n self.write_sep(\"!\", msg)\n if \"KeyboardInterrupt\" in msg:\n if self.config.option.fulltrace:\n excrepr.toterminal(self._tw)\n else:\n excrepr.reprcrash.toterminal(self._tw)\n self._tw.line(\n \"(to show a full traceback on KeyboardInterrupt use --fulltrace)\",\n yellow=True,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._locationline_TerminalReporter._locationline.return.res_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._locationline_TerminalReporter._locationline.return.res_", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 712, "end_line": 732, "span_ids": ["TerminalReporter._locationline"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def _locationline(self, nodeid, fspath, lineno, domain):\n def mkrel(nodeid):\n line = self.config.cwd_relative_nodeid(nodeid)\n if domain and line.endswith(domain):\n line = line[: -len(domain)]\n values = domain.split(\"[\")\n values[0] = values[0].replace(\".\", \"::\") # don't replace '.' in params\n line += \"[\".join(values)\n return line\n\n # collect_fspath comes from testid which has a \"/\"-normalized path\n\n if fspath:\n res = mkrel(nodeid)\n if self.verbosity >= 2 and nodeid.split(\"::\")[0] != fspath.replace(\n \"\\\\\", nodes.SEP\n ):\n res += \" <- \" + self.startdir.bestrelpath(fspath)\n else:\n res = \"[location]\"\n return res + \" \"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._getfailureheadline_TerminalReporter.getreports.return.values": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._getfailureheadline_TerminalReporter.getreports.return.values", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 734, "end_line": 757, "span_ids": ["TerminalReporter._getcrashline", "TerminalReporter.getreports", "TerminalReporter._getfailureheadline"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def _getfailureheadline(self, rep):\n head_line = rep.head_line\n if head_line:\n return head_line\n return \"test session\" # XXX?\n\n def _getcrashline(self, rep):\n try:\n return str(rep.longrepr.reprcrash)\n except AttributeError:\n try:\n return str(rep.longrepr)[:50]\n except AttributeError:\n return \"\"\n\n #\n # summaries for sessionfinish\n #\n def getreports(self, name):\n values = []\n for x in self.stats.get(name, []):\n if not hasattr(x, \"_pdbshown\"):\n values.append(x)\n return values", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.summary_warnings_TerminalReporter.summary_warnings.if_self_hasopt_w_.self__tw_line_Docs_h": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.summary_warnings_TerminalReporter.summary_warnings.if_self_hasopt_w_.self__tw_line_Docs_h", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 759, "end_line": 795, "span_ids": ["TerminalReporter.summary_warnings"], "tokens": 300}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def summary_warnings(self):\n if self.hasopt(\"w\"):\n all_warnings = self.stats.get(\"warnings\")\n if not all_warnings:\n return\n\n final = hasattr(self, \"_already_displayed_warnings\")\n if final:\n warning_reports = all_warnings[self._already_displayed_warnings :]\n else:\n warning_reports = all_warnings\n self._already_displayed_warnings = len(warning_reports)\n if not warning_reports:\n return\n\n reports_grouped_by_message = collections.OrderedDict()\n for wr in warning_reports:\n reports_grouped_by_message.setdefault(wr.message, []).append(wr)\n\n title = \"warnings summary (final)\" if final else \"warnings summary\"\n self.write_sep(\"=\", title, yellow=True, bold=False)\n for message, warning_reports in reports_grouped_by_message.items():\n has_any_location = False\n for w in warning_reports:\n location = w.get_location(self.config)\n if location:\n self._tw.line(str(location))\n has_any_location = True\n if has_any_location:\n lines = message.splitlines()\n indented = \"\\n\".join(\" \" + x for x in lines)\n message = indented.rstrip()\n else:\n message = message.rstrip()\n self._tw.line(message)\n self._tw.line()\n self._tw.line(\"-- Docs: https://docs.pytest.org/en/latest/warnings.html\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.summary_passes_TerminalReporter.print_teardown_sections.for_secname_content_in_r.if_teardown_in_secname_.self__tw_line_content_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.summary_passes_TerminalReporter.print_teardown_sections.for_secname_content_in_r.if_teardown_in_secname_.self__tw_line_content_", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 797, "end_line": 821, "span_ids": ["TerminalReporter.print_teardown_sections", "TerminalReporter.summary_passes"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def summary_passes(self):\n if self.config.option.tbstyle != \"no\":\n if self.hasopt(\"P\"):\n reports = self.getreports(\"passed\")\n if not reports:\n return\n self.write_sep(\"=\", \"PASSES\")\n for rep in reports:\n if rep.sections:\n msg = self._getfailureheadline(rep)\n self.write_sep(\"_\", msg, green=True, bold=True)\n self._outrep_summary(rep)\n\n def print_teardown_sections(self, rep):\n showcapture = self.config.option.showcapture\n if showcapture == \"no\":\n return\n for secname, content in rep.sections:\n if showcapture != \"all\" and showcapture not in secname:\n continue\n if \"teardown\" in secname:\n self._tw.sep(\"-\", secname)\n if content[-1:] == \"\\n\":\n content = content[:-1]\n self._tw.line(content)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.summary_failures_TerminalReporter.summary_failures.if_self_config_option_tbs.if_self_config_option_tbs.else_.for_rep_in_reports_.for_report_in_teardown_se.self_print_teardown_secti": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.summary_failures_TerminalReporter.summary_failures.if_self_config_option_tbs.if_self_config_option_tbs.else_.for_rep_in_reports_.for_report_in_teardown_se.self_print_teardown_secti", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 823, "end_line": 844, "span_ids": ["TerminalReporter.summary_failures"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def summary_failures(self):\n if self.config.option.tbstyle != \"no\":\n reports = self.getreports(\"failed\")\n if not reports:\n return\n self.write_sep(\"=\", \"FAILURES\")\n if self.config.option.tbstyle == \"line\":\n for rep in reports:\n line = self._getcrashline(rep)\n self.write_line(line)\n else:\n teardown_sections = {}\n for report in self.getreports(\"\"):\n if report.when == \"teardown\":\n teardown_sections.setdefault(report.nodeid, []).append(report)\n\n for rep in reports:\n msg = self._getfailureheadline(rep)\n self.write_sep(\"_\", msg, red=True, bold=True)\n self._outrep_summary(rep)\n for report in teardown_sections.get(rep.nodeid, []):\n self.print_teardown_sections(report)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.summary_errors_TerminalReporter.summary_errors.if_self_config_option_tbs.for_rep_in_self_stats_er.self__outrep_summary_rep_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.summary_errors_TerminalReporter.summary_errors.if_self_config_option_tbs.for_rep_in_self_stats_er.self__outrep_summary_rep_", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 846, "end_line": 859, "span_ids": ["TerminalReporter.summary_errors"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def summary_errors(self):\n if self.config.option.tbstyle != \"no\":\n reports = self.getreports(\"error\")\n if not reports:\n return\n self.write_sep(\"=\", \"ERRORS\")\n for rep in self.stats[\"error\"]:\n msg = self._getfailureheadline(rep)\n if rep.when == \"collect\":\n msg = \"ERROR collecting \" + msg\n else:\n msg = \"ERROR at %s of %s\" % (rep.when, msg)\n self.write_sep(\"_\", msg, red=True, bold=True)\n self._outrep_summary(rep)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._outrep_summary_TerminalReporter.summary_stats.if_self_verbosity_1_.self_write_line_msg_ma": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter._outrep_summary_TerminalReporter.summary_stats.if_self_verbosity_1_.self_write_line_msg_ma", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 861, "end_line": 883, "span_ids": ["TerminalReporter._outrep_summary", "TerminalReporter.summary_stats"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def _outrep_summary(self, rep):\n rep.toterminal(self._tw)\n showcapture = self.config.option.showcapture\n if showcapture == \"no\":\n return\n for secname, content in rep.sections:\n if showcapture != \"all\" and showcapture not in secname:\n continue\n self._tw.sep(\"-\", secname)\n if content[-1:] == \"\\n\":\n content = content[:-1]\n self._tw.line(content)\n\n def summary_stats(self):\n session_duration = time.time() - self._sessionstarttime\n (line, color) = build_summary_stats_line(self.stats)\n msg = \"%s in %.2f seconds\" % (line, session_duration)\n markup = {color: True, \"bold\": True}\n\n if self.verbosity >= 0:\n self.write_sep(\"=\", msg, **markup)\n if self.verbosity == -1:\n self.write_line(msg, **markup)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.short_test_summary_TerminalReporter.short_test_summary.show_xpassed.for_rep_in_xpassed_.lines_append_s_s_s_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.short_test_summary_TerminalReporter.short_test_summary.show_xpassed.for_rep_in_xpassed_.lines_append_s_s_s_", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 884, "end_line": 911, "span_ids": ["TerminalReporter.short_test_summary"], "tokens": 252}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def short_test_summary(self):\n if not self.reportchars:\n return\n\n def show_simple(stat, lines):\n failed = self.stats.get(stat, [])\n for rep in failed:\n verbose_word = rep._get_verbose_word(self.config)\n pos = _get_pos(self.config, rep)\n lines.append(\"%s %s\" % (verbose_word, pos))\n\n def show_xfailed(lines):\n xfailed = self.stats.get(\"xfailed\", [])\n for rep in xfailed:\n verbose_word = rep._get_verbose_word(self.config)\n pos = _get_pos(self.config, rep)\n lines.append(\"%s %s\" % (verbose_word, pos))\n reason = rep.wasxfail\n if reason:\n lines.append(\" \" + str(reason))\n\n def show_xpassed(lines):\n xpassed = self.stats.get(\"xpassed\", [])\n for rep in xpassed:\n verbose_word = rep._get_verbose_word(self.config)\n pos = _get_pos(self.config, rep)\n reason = rep.wasxfail\n lines.append(\"%s %s %s\" % (verbose_word, pos, reason))\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.short_test_summary.show_skipped_TerminalReporter.short_test_summary.show_skipped.for_num_fspath_lineno_.if_lineno_is_not_None_.else_.lines_append_s_d_s_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.short_test_summary.show_skipped_TerminalReporter.short_test_summary.show_skipped.for_num_fspath_lineno_.if_lineno_is_not_None_.else_.lines_append_s_d_s_", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 917, "end_line": 932, "span_ids": ["TerminalReporter.short_test_summary"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def short_test_summary(self):\n # ... other code\n\n def show_skipped(lines):\n skipped = self.stats.get(\"skipped\", [])\n fskips = _folded_skips(skipped) if skipped else []\n if not fskips:\n return\n verbose_word = skipped[0]._get_verbose_word(self.config)\n for num, fspath, lineno, reason in fskips:\n if reason.startswith(\"Skipped: \"):\n reason = reason[9:]\n if lineno is not None:\n lines.append(\n \"%s [%d] %s:%d: %s\"\n % (verbose_word, num, fspath, lineno + 1, reason)\n )\n else:\n lines.append(\"%s [%d] %s: %s\" % (verbose_word, num, fspath, reason))\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py__folded_skips__folded_skips.return.values": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py__folded_skips__folded_skips.return.values", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1015, "end_line": 1034, "span_ids": ["_folded_skips"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _folded_skips(skipped):\n d = {}\n for event in skipped:\n key = event.longrepr\n assert len(key) == 3, (event, key)\n keywords = getattr(event, \"keywords\", {})\n # folding reports with global pytestmark variable\n # this is workaround, because for now we cannot identify the scope of a skip marker\n # TODO: revisit after marks scope would be fixed\n if (\n event.when == \"setup\"\n and \"skip\" in keywords\n and \"pytestmark\" not in keywords\n ):\n key = (key[0], None, key[2])\n d.setdefault(key, []).append(event)\n values = []\n for key, events in d.items():\n values.append((len(events),) + key)\n return values", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_build_summary_stats_line_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_build_summary_stats_line_", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1037, "end_line": 1086, "span_ids": ["build_summary_stats_line", "_plugin_nameversions"], "tokens": 347}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def build_summary_stats_line(stats):\n known_types = (\n \"failed passed skipped deselected xfailed xpassed warnings error\".split()\n )\n unknown_type_seen = False\n for found_type in stats:\n if found_type not in known_types:\n if found_type: # setup/teardown reports have an empty key, ignore them\n known_types.append(found_type)\n unknown_type_seen = True\n parts = []\n for key in known_types:\n reports = stats.get(key, None)\n if reports:\n count = sum(\n 1 for rep in reports if getattr(rep, \"count_towards_summary\", True)\n )\n parts.append(\"%d %s\" % (count, key))\n\n if parts:\n line = \", \".join(parts)\n else:\n line = \"no tests ran\"\n\n if \"failed\" in stats or \"error\" in stats:\n color = \"red\"\n elif \"warnings\" in stats or unknown_type_seen:\n color = \"yellow\"\n elif \"passed\" in stats:\n color = \"green\"\n else:\n color = \"yellow\"\n\n return line, color\n\n\ndef _plugin_nameversions(plugininfo):\n values = []\n for plugin, dist in plugininfo:\n # gets us name and version!\n name = \"{dist.project_name}-{dist.version}\".format(dist=dist)\n # questionable convenience, but it keeps things short\n if name.startswith(\"pytest-\"):\n name = name[7:]\n # we decided to print python package names\n # they can have more than one plugin\n if name not in values:\n values.append(name)\n return values", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/tmpdir.py__support_for_providing_TempPathFactory.mktemp.return.p": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/tmpdir.py__support_for_providing_TempPathFactory.mktemp.return.p", "embedding": null, "metadata": {"file_path": "src/_pytest/tmpdir.py", "file_name": "tmpdir.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 58, "span_ids": ["docstring", "TempPathFactory.mktemp", "TempPathFactory", "imports", "TempPathFactory.from_config"], "tokens": 396}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" support for providing temporary directories to test functions. \"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport re\nimport tempfile\nimport warnings\n\nimport attr\nimport py\nimport six\n\nimport pytest\nfrom .pathlib import ensure_reset_dir\nfrom .pathlib import LOCK_TIMEOUT\nfrom .pathlib import make_numbered_dir\nfrom .pathlib import make_numbered_dir_with_cleanup\nfrom .pathlib import Path\nfrom _pytest.monkeypatch import MonkeyPatch\n\n\n@attr.s\nclass TempPathFactory(object):\n \"\"\"Factory for temporary directories under the common base temp directory.\n\n The base directory can be configured using the ``--basetemp`` option.\"\"\"\n\n _given_basetemp = attr.ib(\n # using os.path.abspath() to get absolute path instead of resolve() as it\n # does not work the same in all platforms (see #4427)\n # Path.absolute() exists, but it is not public (see https://bugs.python.org/issue25012)\n converter=attr.converters.optional(\n lambda p: Path(os.path.abspath(six.text_type(p)))\n )\n )\n _trace = attr.ib()\n _basetemp = attr.ib(default=None)\n\n @classmethod\n def from_config(cls, config):\n \"\"\"\n :param config: a pytest configuration\n \"\"\"\n return cls(\n given_basetemp=config.option.basetemp, trace=config.trace.get(\"tmpdir\")\n )\n\n def mktemp(self, basename, numbered=True):\n \"\"\"makes a temporary directory managed by the factory\"\"\"\n if not numbered:\n p = self.getbasetemp().joinpath(basename)\n p.mkdir()\n else:\n p = make_numbered_dir(root=self.getbasetemp(), prefix=basename)\n self._trace(\"mktemp\", p)\n return p", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/tmpdir.py_TempPathFactory.getbasetemp_TempPathFactory.getbasetemp.if_self__basetemp_is_None.else_.return.self__basetemp": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/tmpdir.py_TempPathFactory.getbasetemp_TempPathFactory.getbasetemp.if_self__basetemp_is_None.else_.return.self__basetemp", "embedding": null, "metadata": {"file_path": "src/_pytest/tmpdir.py", "file_name": "tmpdir.py", "file_type": "text/x-python", "category": "implementation", "start_line": 60, "end_line": 83, "span_ids": ["TempPathFactory.getbasetemp"], "tokens": 253}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s\nclass TempPathFactory(object):\n\n def getbasetemp(self):\n \"\"\" return base temporary directory. \"\"\"\n if self._basetemp is None:\n if self._given_basetemp is not None:\n basetemp = self._given_basetemp\n ensure_reset_dir(basetemp)\n basetemp = basetemp.resolve()\n else:\n from_env = os.environ.get(\"PYTEST_DEBUG_TEMPROOT\")\n temproot = Path(from_env or tempfile.gettempdir()).resolve()\n user = get_user() or \"unknown\"\n # use a sub-directory in the temproot to speed-up\n # make_numbered_dir() call\n rootdir = temproot.joinpath(\"pytest-of-{}\".format(user))\n rootdir.mkdir(exist_ok=True)\n basetemp = make_numbered_dir_with_cleanup(\n prefix=\"pytest-\", root=rootdir, keep=3, lock_timeout=LOCK_TIMEOUT\n )\n assert basetemp is not None\n self._basetemp = t = basetemp\n self._trace(\"new basetemp\", t)\n return t\n else:\n return self._basetemp", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/tmpdir.py_TempdirFactory_TempdirFactory.ensuretemp.return.self_getbasetemp_ensure": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/tmpdir.py_TempdirFactory_TempdirFactory.ensuretemp.return.self_getbasetemp_ensure", "embedding": null, "metadata": {"file_path": "src/_pytest/tmpdir.py", "file_name": "tmpdir.py", "file_type": "text/x-python", "category": "implementation", "start_line": 86, "end_line": 106, "span_ids": ["TempdirFactory.ensuretemp", "TempdirFactory"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s\nclass TempdirFactory(object):\n \"\"\"\n backward comptibility wrapper that implements\n :class:``py.path.local`` for :class:``TempPathFactory``\n \"\"\"\n\n _tmppath_factory = attr.ib()\n\n def ensuretemp(self, string, dir=1):\n \"\"\" (deprecated) return temporary directory path with\n the given string as the trailing part. It is usually\n better to use the 'tmpdir' function argument which\n provides an empty unique-per-test-invocation directory\n and is guaranteed to be empty.\n \"\"\"\n # py.log._apiwarn(\">1.1\", \"use tmpdir function argument\")\n from .deprecated import PYTEST_ENSURETEMP\n\n warnings.warn(PYTEST_ENSURETEMP, stacklevel=2)\n return self.getbasetemp().ensure(string, dir=dir)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/tmpdir.py_TempdirFactory.mktemp_get_user.try_.except_ImportError_KeyE.return.None": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/tmpdir.py_TempdirFactory.mktemp_get_user.try_.except_ImportError_KeyE.return.None", "embedding": null, "metadata": {"file_path": "src/_pytest/tmpdir.py", "file_name": "tmpdir.py", "file_type": "text/x-python", "category": "implementation", "start_line": 108, "end_line": 129, "span_ids": ["TempdirFactory.mktemp", "get_user", "TempdirFactory.getbasetemp"], "tokens": 187}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@attr.s\nclass TempdirFactory(object):\n\n def mktemp(self, basename, numbered=True):\n \"\"\"Create a subdirectory of the base temporary directory and return it.\n If ``numbered``, ensure the directory is unique by adding a number\n prefix greater than any existing one.\n \"\"\"\n return py.path.local(self._tmppath_factory.mktemp(basename, numbered).resolve())\n\n def getbasetemp(self):\n \"\"\"backward compat wrapper for ``_tmppath_factory.getbasetemp``\"\"\"\n return py.path.local(self._tmppath_factory.getbasetemp().resolve())\n\n\ndef get_user():\n \"\"\"Return the current user name, or None if getuser() does not work\n in the current environment (see #1010).\n \"\"\"\n import getpass\n\n try:\n return getpass.getuser()\n except (ImportError, KeyError):\n return None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/tmpdir.py_pytest_configure_pytest_configure.mp_setattr_pytest_ensur": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/tmpdir.py_pytest_configure_pytest_configure.mp_setattr_pytest_ensur", "embedding": null, "metadata": {"file_path": "src/_pytest/tmpdir.py", "file_name": "tmpdir.py", "file_type": "text/x-python", "category": "implementation", "start_line": 132, "end_line": 145, "span_ids": ["pytest_configure"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_configure(config):\n \"\"\"Create a TempdirFactory and attach it to the config object.\n\n This is to comply with existing plugins which expect the handler to be\n available at pytest_configure time, but ideally should be moved entirely\n to the tmpdir_factory session fixture.\n \"\"\"\n mp = MonkeyPatch()\n tmppath_handler = TempPathFactory.from_config(config)\n t = TempdirFactory(tmppath_handler)\n config._cleanup.append(mp.undo)\n mp.setattr(config, \"_tmp_path_factory\", tmppath_handler, raising=False)\n mp.setattr(config, \"_tmpdirhandler\", t, raising=False)\n mp.setattr(pytest, \"ensuretemp\", t.ensuretemp, raising=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/tmpdir.py_tmpdir_factory_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/tmpdir.py_tmpdir_factory_", "embedding": null, "metadata": {"file_path": "src/_pytest/tmpdir.py", "file_name": "tmpdir.py", "file_type": "text/x-python", "category": "implementation", "start_line": 148, "end_line": 197, "span_ids": ["_mk_tmp", "tmp_path_factory", "tmp_path", "tmpdir_factory", "tmpdir"], "tokens": 323}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture(scope=\"session\")\ndef tmpdir_factory(request):\n \"\"\"Return a :class:`_pytest.tmpdir.TempdirFactory` instance for the test session.\n \"\"\"\n return request.config._tmpdirhandler\n\n\n@pytest.fixture(scope=\"session\")\ndef tmp_path_factory(request):\n \"\"\"Return a :class:`_pytest.tmpdir.TempPathFactory` instance for the test session.\n \"\"\"\n return request.config._tmp_path_factory\n\n\ndef _mk_tmp(request, factory):\n name = request.node.name\n name = re.sub(r\"[\\W]\", \"_\", name)\n MAXVAL = 30\n name = name[:MAXVAL]\n return factory.mktemp(name, numbered=True)\n\n\n@pytest.fixture\ndef tmpdir(tmp_path):\n \"\"\"Return a temporary directory path object\n which is unique to each test function invocation,\n created as a sub directory of the base temporary\n directory. The returned object is a `py.path.local`_\n path object.\n\n .. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html\n \"\"\"\n return py.path.local(tmp_path)\n\n\n@pytest.fixture\ndef tmp_path(request, tmp_path_factory):\n \"\"\"Return a temporary directory path object\n which is unique to each test function invocation,\n created as a sub directory of the base temporary\n directory. The returned object is a :class:`pathlib.Path`\n object.\n\n .. note::\n\n in python < 3.6 this is a pathlib2.Path\n \"\"\"\n\n return _mk_tmp(request, tmp_path_factory)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py__discovery_and_running_pytest_pycollect_makeitem.return.UnitTestCase_name_parent": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py__discovery_and_running_pytest_pycollect_makeitem.return.UnitTestCase_name_parent", "embedding": null, "metadata": {"file_path": "src/_pytest/unittest.py", "file_name": "unittest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 28, "span_ids": ["pytest_pycollect_makeitem", "imports", "docstring"], "tokens": 183}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" discovery and running of std-library \"unittest\" style tests. \"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport traceback\n\nimport _pytest._code\nimport pytest\nfrom _pytest.compat import getimfunc\nfrom _pytest.config import hookimpl\nfrom _pytest.outcomes import fail\nfrom _pytest.outcomes import skip\nfrom _pytest.outcomes import xfail\nfrom _pytest.python import Class\nfrom _pytest.python import Function\n\n\ndef pytest_pycollect_makeitem(collector, name, obj):\n # has unittest been imported and is obj a subclass of its TestCase?\n try:\n if not issubclass(obj, sys.modules[\"unittest\"].TestCase):\n return\n except Exception:\n return\n # yes, so let's collect it\n return UnitTestCase(name, parent=collector)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py_UnitTestCase_UnitTestCase.collect.if_not_foundsomething_.if_runtest_is_not_None_.if_ut_is_None_or_runtest_.yield_TestCaseFunction_r": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py_UnitTestCase_UnitTestCase.collect.if_not_foundsomething_.if_runtest_is_not_None_.if_ut_is_None_or_runtest_.yield_TestCaseFunction_r", "embedding": null, "metadata": {"file_path": "src/_pytest/unittest.py", "file_name": "unittest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 31, "end_line": 64, "span_ids": ["UnitTestCase", "UnitTestCase.collect"], "tokens": 267}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class UnitTestCase(Class):\n # marker for fixturemanger.getfixtureinfo()\n # to declare that our children do not support funcargs\n nofuncargs = True\n\n def collect(self):\n from unittest import TestLoader\n\n cls = self.obj\n if not getattr(cls, \"__test__\", True):\n return\n\n skipped = getattr(cls, \"__unittest_skip__\", False)\n if not skipped:\n self._inject_setup_teardown_fixtures(cls)\n self._inject_setup_class_fixture()\n\n self.session._fixturemanager.parsefactories(self, unittest=True)\n loader = TestLoader()\n foundsomething = False\n for name in loader.getTestCaseNames(self.obj):\n x = getattr(self.obj, name)\n if not getattr(x, \"__test__\", True):\n continue\n funcobj = getimfunc(x)\n yield TestCaseFunction(name, parent=self, callobj=funcobj)\n foundsomething = True\n\n if not foundsomething:\n runtest = getattr(self.obj, \"runTest\", None)\n if runtest is not None:\n ut = sys.modules.get(\"twisted.trial.unittest\", None)\n if ut is None or runtest != ut.TestCase.runTest:\n yield TestCaseFunction(\"runTest\", parent=self)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py_UnitTestCase._inject_setup_teardown_fixtures_UnitTestCase._inject_setup_teardown_fixtures.if_method_fixture_.cls.__pytest_method_setup.method_fixture": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py_UnitTestCase._inject_setup_teardown_fixtures_UnitTestCase._inject_setup_teardown_fixtures.if_method_fixture_.cls.__pytest_method_setup.method_fixture", "embedding": null, "metadata": {"file_path": "src/_pytest/unittest.py", "file_name": "unittest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 66, "end_line": 79, "span_ids": ["UnitTestCase._inject_setup_teardown_fixtures"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class UnitTestCase(Class):\n # marker for fixturemanger.getfixtureinfo()\n\n def _inject_setup_teardown_fixtures(self, cls):\n \"\"\"Injects a hidden auto-use fixture to invoke setUpClass/setup_method and corresponding\n teardown functions (#517)\"\"\"\n class_fixture = _make_xunit_fixture(\n cls, \"setUpClass\", \"tearDownClass\", scope=\"class\", pass_self=False\n )\n if class_fixture:\n cls.__pytest_class_setup = class_fixture\n\n method_fixture = _make_xunit_fixture(\n cls, \"setup_method\", \"teardown_method\", scope=\"function\", pass_self=True\n )\n if method_fixture:\n cls.__pytest_method_setup = method_fixture", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py__make_xunit_fixture__make_xunit_fixture.return.fixture": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py__make_xunit_fixture__make_xunit_fixture.return.fixture", "embedding": null, "metadata": {"file_path": "src/_pytest/unittest.py", "file_name": "unittest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 82, "end_line": 105, "span_ids": ["_make_xunit_fixture"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _make_xunit_fixture(obj, setup_name, teardown_name, scope, pass_self):\n setup = getattr(obj, setup_name, None)\n teardown = getattr(obj, teardown_name, None)\n if setup is None and teardown is None:\n return None\n\n @pytest.fixture(scope=scope, autouse=True)\n def fixture(self, request):\n if getattr(self, \"__unittest_skip__\", None):\n reason = self.__unittest_skip_why__\n pytest.skip(reason)\n if setup is not None:\n if pass_self:\n setup(self, request.function)\n else:\n setup()\n yield\n if teardown is not None:\n if pass_self:\n teardown(self, request.function)\n else:\n teardown()\n\n return fixture", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py_TestCaseFunction_TestCaseFunction.startTest.pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py_TestCaseFunction_TestCaseFunction.startTest.pass", "embedding": null, "metadata": {"file_path": "src/_pytest/unittest.py", "file_name": "unittest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 108, "end_line": 136, "span_ids": ["TestCaseFunction.startTest", "TestCaseFunction.setup", "TestCaseFunction", "TestCaseFunction.teardown", "TestCaseFunction._fix_unittest_skip_decorator"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCaseFunction(Function):\n nofuncargs = True\n _excinfo = None\n _testcase = None\n\n def setup(self):\n self._testcase = self.parent.obj(self.name)\n self._fix_unittest_skip_decorator()\n if hasattr(self, \"_request\"):\n self._request._fillfixtures()\n\n def _fix_unittest_skip_decorator(self):\n \"\"\"\n The @unittest.skip decorator calls functools.wraps(self._testcase)\n The call to functools.wraps() fails unless self._testcase\n has a __name__ attribute. This is usually automatically supplied\n if the test is a function or method, but we need to add manually\n here.\n\n See issue #1169\n \"\"\"\n if sys.version_info[0] == 2:\n setattr(self._testcase, \"__name__\", self.name)\n\n def teardown(self):\n self._testcase = None\n\n def startTest(self, testcase):\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py_TestCaseFunction._addexcinfo_TestCaseFunction._addexcinfo.self___dict___setdefault_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py_TestCaseFunction._addexcinfo_TestCaseFunction._addexcinfo.self___dict___setdefault_", "embedding": null, "metadata": {"file_path": "src/_pytest/unittest.py", "file_name": "unittest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 138, "end_line": 169, "span_ids": ["TestCaseFunction._addexcinfo"], "tokens": 250}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCaseFunction(Function):\n\n def _addexcinfo(self, rawexcinfo):\n # unwrap potential exception info (see twisted trial support below)\n rawexcinfo = getattr(rawexcinfo, \"_rawexcinfo\", rawexcinfo)\n try:\n excinfo = _pytest._code.ExceptionInfo(rawexcinfo)\n # invoke the attributes to trigger storing the traceback\n # trial causes some issue there\n excinfo.value\n excinfo.traceback\n except TypeError:\n try:\n try:\n values = traceback.format_exception(*rawexcinfo)\n values.insert(\n 0,\n \"NOTE: Incompatible Exception Representation, \"\n \"displaying natively:\\n\\n\",\n )\n fail(\"\".join(values), pytrace=False)\n except (fail.Exception, KeyboardInterrupt):\n raise\n except: # noqa\n fail(\n \"ERROR: Unknown Incompatible Exception \"\n \"representation:\\n%r\" % (rawexcinfo,),\n pytrace=False,\n )\n except KeyboardInterrupt:\n raise\n except fail.Exception:\n excinfo = _pytest._code.ExceptionInfo.from_current()\n self.__dict__.setdefault(\"_excinfo\", []).append(excinfo)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py_TestCaseFunction.addError_TestCaseFunction.stopTest.pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py_TestCaseFunction.addError_TestCaseFunction.stopTest.pass", "embedding": null, "metadata": {"file_path": "src/_pytest/unittest.py", "file_name": "unittest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 171, "end_line": 197, "span_ids": ["TestCaseFunction.addError", "TestCaseFunction.addSkip", "TestCaseFunction.stopTest", "TestCaseFunction.addFailure", "TestCaseFunction.addUnexpectedSuccess", "TestCaseFunction.addExpectedFailure", "TestCaseFunction.addSuccess"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCaseFunction(Function):\n\n def addError(self, testcase, rawexcinfo):\n self._addexcinfo(rawexcinfo)\n\n def addFailure(self, testcase, rawexcinfo):\n self._addexcinfo(rawexcinfo)\n\n def addSkip(self, testcase, reason):\n try:\n skip(reason)\n except skip.Exception:\n self._skipped_by_mark = True\n self._addexcinfo(sys.exc_info())\n\n def addExpectedFailure(self, testcase, rawexcinfo, reason=\"\"):\n try:\n xfail(str(reason))\n except xfail.Exception:\n self._addexcinfo(sys.exc_info())\n\n def addUnexpectedSuccess(self, testcase, reason=\"\"):\n self._unexpectedsuccess = reason\n\n def addSuccess(self, testcase):\n pass\n\n def stopTest(self, testcase):\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py_TestCaseFunction._handle_skip_TestCaseFunction._handle_skip.return.False": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py_TestCaseFunction._handle_skip_TestCaseFunction._handle_skip.return.False", "embedding": null, "metadata": {"file_path": "src/_pytest/unittest.py", "file_name": "unittest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 199, "end_line": 217, "span_ids": ["TestCaseFunction._handle_skip"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCaseFunction(Function):\n\n def _handle_skip(self):\n # implements the skipping machinery (see #2137)\n # analog to pythons Lib/unittest/case.py:run\n testMethod = getattr(self._testcase, self._testcase._testMethodName)\n if getattr(self._testcase.__class__, \"__unittest_skip__\", False) or getattr(\n testMethod, \"__unittest_skip__\", False\n ):\n # If the class or method was skipped.\n skip_why = getattr(\n self._testcase.__class__, \"__unittest_skip_why__\", \"\"\n ) or getattr(testMethod, \"__unittest_skip_why__\", \"\")\n try: # PY3, unittest2 on PY2\n self._testcase._addSkip(self, self._testcase, skip_why)\n except TypeError: # PY2\n if sys.version_info[0] != 2:\n raise\n self._testcase._addSkip(self, skip_why)\n return True\n return False", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py_TestCaseFunction.runtest_pytest_runtest_makereport.if_isinstance_item_TestC.if_item__excinfo_.try_.except_AttributeError_.pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py_TestCaseFunction.runtest_pytest_runtest_makereport.if_isinstance_item_TestC.if_item__excinfo_.try_.except_AttributeError_.pass", "embedding": null, "metadata": {"file_path": "src/_pytest/unittest.py", "file_name": "unittest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 219, "end_line": 245, "span_ids": ["TestCaseFunction.runtest", "TestCaseFunction._prunetraceback", "pytest_runtest_makereport"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCaseFunction(Function):\n\n def runtest(self):\n if self.config.pluginmanager.get_plugin(\"pdbinvoke\") is None:\n self._testcase(result=self)\n else:\n # disables tearDown and cleanups for post mortem debugging (see #1890)\n if self._handle_skip():\n return\n self._testcase.debug()\n\n def _prunetraceback(self, excinfo):\n Function._prunetraceback(self, excinfo)\n traceback = excinfo.traceback.filter(\n lambda x: not x.frame.f_globals.get(\"__unittest\")\n )\n if traceback:\n excinfo.traceback = traceback\n\n\n@hookimpl(tryfirst=True)\ndef pytest_runtest_makereport(item, call):\n if isinstance(item, TestCaseFunction):\n if item._excinfo:\n call.excinfo = item._excinfo.pop(0)\n try:\n del call.result\n except AttributeError:\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py__twisted_trial_support_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/unittest.py__twisted_trial_support_", "embedding": null, "metadata": {"file_path": "src/_pytest/unittest.py", "file_name": "unittest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 248, "end_line": 289, "span_ids": ["check_testcase_implements_trial_reporter", "pytest_runtest_protocol", "pytest_runtest_makereport"], "tokens": 286}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# twisted trial support\n\n\n@hookimpl(hookwrapper=True)\ndef pytest_runtest_protocol(item):\n if isinstance(item, TestCaseFunction) and \"twisted.trial.unittest\" in sys.modules:\n ut = sys.modules[\"twisted.python.failure\"]\n Failure__init__ = ut.Failure.__init__\n check_testcase_implements_trial_reporter()\n\n def excstore(\n self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None\n ):\n if exc_value is None:\n self._rawexcinfo = sys.exc_info()\n else:\n if exc_type is None:\n exc_type = type(exc_value)\n self._rawexcinfo = (exc_type, exc_value, exc_tb)\n try:\n Failure__init__(\n self, exc_value, exc_type, exc_tb, captureVars=captureVars\n )\n except TypeError:\n Failure__init__(self, exc_value, exc_type, exc_tb)\n\n ut.Failure.__init__ = excstore\n yield\n ut.Failure.__init__ = Failure__init__\n else:\n yield\n\n\ndef check_testcase_implements_trial_reporter(done=[]):\n if done:\n return\n from zope.interface import classImplements\n from twisted.trial.itrial import IReporter\n\n classImplements(TestCaseFunction, IReporter)\n done.append(1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warning_types.py_attr_PytestDeprecationWarning._": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warning_types.py_attr_PytestDeprecationWarning._", "embedding": null, "metadata": {"file_path": "src/_pytest/warning_types.py", "file_name": "warning_types.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 49, "span_ids": ["PytestCollectionWarning", "PytestCacheWarning", "PytestConfigWarning", "PytestDeprecationWarning", "imports", "PytestWarning", "PytestAssertRewriteWarning"], "tokens": 238}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import attr\n\n\nclass PytestWarning(UserWarning):\n \"\"\"\n Bases: :class:`UserWarning`.\n\n Base class for all warnings emitted by pytest.\n \"\"\"\n\n\nclass PytestAssertRewriteWarning(PytestWarning):\n \"\"\"\n Bases: :class:`PytestWarning`.\n\n Warning emitted by the pytest assert rewrite module.\n \"\"\"\n\n\nclass PytestCacheWarning(PytestWarning):\n \"\"\"\n Bases: :class:`PytestWarning`.\n\n Warning emitted by the cache plugin in various situations.\n \"\"\"\n\n\nclass PytestConfigWarning(PytestWarning):\n \"\"\"\n Bases: :class:`PytestWarning`.\n\n Warning emitted for configuration issues.\n \"\"\"\n\n\nclass PytestCollectionWarning(PytestWarning):\n \"\"\"\n Bases: :class:`PytestWarning`.\n\n Warning emitted when pytest is not able to collect a file or symbol in a module.\n \"\"\"\n\n\nclass PytestDeprecationWarning(PytestWarning, DeprecationWarning):\n \"\"\"\n Bases: :class:`pytest.PytestWarning`, :class:`DeprecationWarning`.\n\n Warning class for features that will be removed in a future version.\n \"\"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warning_types.py_PytestExperimentalApiWarning_PytestExperimentalApiWarning.simple.return.cls_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warning_types.py_PytestExperimentalApiWarning_PytestExperimentalApiWarning.simple.return.cls_", "embedding": null, "metadata": {"file_path": "src/_pytest/warning_types.py", "file_name": "warning_types.py", "file_type": "text/x-python", "category": "implementation", "start_line": 52, "end_line": 66, "span_ids": ["PytestExperimentalApiWarning.simple", "PytestExperimentalApiWarning"], "tokens": 109}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class PytestExperimentalApiWarning(PytestWarning, FutureWarning):\n \"\"\"\n Bases: :class:`pytest.PytestWarning`, :class:`FutureWarning`.\n\n Warning category used to denote experiments in pytest. Use sparingly as the API might change or even be\n removed completely in future version\n \"\"\"\n\n @classmethod\n def simple(cls, apiname):\n return cls(\n \"{apiname} is an experimental api that may change over time\".format(\n apiname=apiname\n )\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warning_types.py_PytestUnhandledCoroutineWarning_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warning_types.py_PytestUnhandledCoroutineWarning_", "embedding": null, "metadata": {"file_path": "src/_pytest/warning_types.py", "file_name": "warning_types.py", "file_type": "text/x-python", "category": "implementation", "start_line": 69, "end_line": 112, "span_ids": ["PytestUnknownMarkWarning", "impl", "UnformattedWarning", "UnformattedWarning.format", "RemovedInPytest4Warning", "PytestUnhandledCoroutineWarning"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class PytestUnhandledCoroutineWarning(PytestWarning):\n \"\"\"\n Bases: :class:`PytestWarning`.\n\n Warning emitted when pytest encounters a test function which is a coroutine,\n but it was not handled by any async-aware plugin. Coroutine test functions\n are not natively supported.\n \"\"\"\n\n\nclass PytestUnknownMarkWarning(PytestWarning):\n \"\"\"\n Bases: :class:`PytestWarning`.\n\n Warning emitted on use of unknown markers.\n See https://docs.pytest.org/en/latest/mark.html for details.\n \"\"\"\n\n\nclass RemovedInPytest4Warning(PytestDeprecationWarning):\n \"\"\"\n Bases: :class:`pytest.PytestDeprecationWarning`.\n\n Warning class for features scheduled to be removed in pytest 4.0.\n \"\"\"\n\n\n@attr.s\nclass UnformattedWarning(object):\n \"\"\"Used to hold warnings that need to format their message at runtime, as opposed to a direct message.\n\n Using this class avoids to keep all the warning types and messages in this module, avoiding misuse.\n \"\"\"\n\n category = attr.ib()\n template = attr.ib()\n\n def format(self, **kwargs):\n \"\"\"Returns an instance of the warning category, formatted with given kwargs\"\"\"\n return self.category(self.template.format(**kwargs))\n\n\nPYTESTER_COPY_EXAMPLE = PytestExperimentalApiWarning.simple(\"testdir.copy_example\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warnings.py_from___future___import_ab__setoption.wmod_filterwarnings_actio": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warnings.py_from___future___import_ab__setoption.wmod_filterwarnings_actio", "embedding": null, "metadata": {"file_path": "src/_pytest/warnings.py", "file_name": "warnings.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 36, "span_ids": ["_setoption", "imports", "impl"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport warnings\nfrom contextlib import contextmanager\n\nimport pytest\nfrom _pytest import compat\n\nSHOW_PYTEST_WARNINGS_ARG = \"-Walways::pytest.RemovedInPytest4Warning\"\n\n\ndef _setoption(wmod, arg):\n \"\"\"\n Copy of the warning._setoption function but does not escape arguments.\n \"\"\"\n parts = arg.split(\":\")\n if len(parts) > 5:\n raise wmod._OptionError(\"too many fields (max 5): %r\" % (arg,))\n while len(parts) < 5:\n parts.append(\"\")\n action, message, category, module, lineno = [s.strip() for s in parts]\n action = wmod._getaction(action)\n category = wmod._getcategory(category)\n if lineno:\n try:\n lineno = int(lineno)\n if lineno < 0:\n raise ValueError\n except (ValueError, OverflowError):\n raise wmod._OptionError(\"invalid lineno %r\" % (lineno,))\n else:\n lineno = 0\n wmod.filterwarnings(action, message, category, module, lineno)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warnings.py_pytest_addoption_pytest_configure.config_addinivalue_line_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warnings.py_pytest_addoption_pytest_configure.config_addinivalue_line_", "embedding": null, "metadata": {"file_path": "src/_pytest/warnings.py", "file_name": "warnings.py", "file_type": "text/x-python", "category": "implementation", "start_line": 39, "end_line": 61, "span_ids": ["pytest_addoption", "pytest_configure"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_addoption(parser):\n group = parser.getgroup(\"pytest-warnings\")\n group.addoption(\n \"-W\",\n \"--pythonwarnings\",\n action=\"append\",\n help=\"set which warnings to report, see -W option of python itself.\",\n )\n parser.addini(\n \"filterwarnings\",\n type=\"linelist\",\n help=\"Each line specifies a pattern for \"\n \"warnings.filterwarnings. \"\n \"Processed after -W and --pythonwarnings.\",\n )\n\n\ndef pytest_configure(config):\n config.addinivalue_line(\n \"markers\",\n \"filterwarnings(warning): add a warning filter to the given test. \"\n \"see https://docs.pytest.org/en/latest/warnings.html#pytest-mark-filterwarnings \",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warnings.py_catch_warnings_for_item_catch_warnings_for_item.with_warnings_catch_warni.for_warning_message_in_lo.ihook_pytest_warning_capt": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warnings.py_catch_warnings_for_item_catch_warnings_for_item.with_warnings_catch_warni.for_warning_message_in_lo.ihook_pytest_warning_capt", "embedding": null, "metadata": {"file_path": "src/_pytest/warnings.py", "file_name": "warnings.py", "file_type": "text/x-python", "category": "implementation", "start_line": 64, "end_line": 102, "span_ids": ["catch_warnings_for_item"], "tokens": 315}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@contextmanager\ndef catch_warnings_for_item(config, ihook, when, item):\n \"\"\"\n Context manager that catches warnings generated in the contained execution block.\n\n ``item`` can be None if we are not in the context of an item execution.\n\n Each warning captured triggers the ``pytest_warning_captured`` hook.\n \"\"\"\n cmdline_filters = config.getoption(\"pythonwarnings\") or []\n inifilters = config.getini(\"filterwarnings\")\n with warnings.catch_warnings(record=True) as log:\n\n if not sys.warnoptions:\n # if user is not explicitly configuring warning filters, show deprecation warnings by default (#2908)\n warnings.filterwarnings(\"always\", category=DeprecationWarning)\n warnings.filterwarnings(\"always\", category=PendingDeprecationWarning)\n\n warnings.filterwarnings(\"error\", category=pytest.RemovedInPytest4Warning)\n\n # filters should have this precedence: mark, cmdline options, ini\n # filters should be applied in the inverse order of precedence\n for arg in inifilters:\n _setoption(warnings, arg)\n\n for arg in cmdline_filters:\n warnings._setoption(arg)\n\n if item is not None:\n for mark in item.iter_markers(name=\"filterwarnings\"):\n for arg in mark.args:\n _setoption(warnings, arg)\n\n yield\n\n for warning_message in log:\n ihook.pytest_warning_captured.call_historic(\n kwargs=dict(warning_message=warning_message, when=when, item=item)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warnings.py_warning_record_to_str_warning_record_to_str.return.msg": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warnings.py_warning_record_to_str_warning_record_to_str.return.msg", "embedding": null, "metadata": {"file_path": "src/_pytest/warnings.py", "file_name": "warnings.py", "file_type": "text/x-python", "category": "implementation", "start_line": 105, "end_line": 135, "span_ids": ["warning_record_to_str"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def warning_record_to_str(warning_message):\n \"\"\"Convert a warnings.WarningMessage to a string.\n\n This takes lot of unicode shenaningans into account for Python 2.\n When Python 2 support is dropped this function can be greatly simplified.\n \"\"\"\n warn_msg = warning_message.message\n unicode_warning = False\n if compat._PY2 and any(isinstance(m, compat.UNICODE_TYPES) for m in warn_msg.args):\n new_args = []\n for m in warn_msg.args:\n new_args.append(\n compat.ascii_escaped(m) if isinstance(m, compat.UNICODE_TYPES) else m\n )\n unicode_warning = list(warn_msg.args) != new_args\n warn_msg.args = new_args\n\n msg = warnings.formatwarning(\n warn_msg,\n warning_message.category,\n warning_message.filename,\n warning_message.lineno,\n warning_message.line,\n )\n if unicode_warning:\n warnings.warn(\n \"Warning is using unicode non convertible to ascii, \"\n \"converting to a safe representation:\\n {!r}\".format(compat.safe_str(msg)),\n UnicodeWarning,\n )\n return msg", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warnings.py_pytest_runtest_protocol_pytest_terminal_summary.with_catch_warnings_for_i.yield": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warnings.py_pytest_runtest_protocol_pytest_terminal_summary.with_catch_warnings_for_i.yield", "embedding": null, "metadata": {"file_path": "src/_pytest/warnings.py", "file_name": "warnings.py", "file_type": "text/x-python", "category": "implementation", "start_line": 138, "end_line": 161, "span_ids": ["pytest_runtest_protocol", "pytest_collection", "pytest_terminal_summary"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.hookimpl(hookwrapper=True, tryfirst=True)\ndef pytest_runtest_protocol(item):\n with catch_warnings_for_item(\n config=item.config, ihook=item.ihook, when=\"runtest\", item=item\n ):\n yield\n\n\n@pytest.hookimpl(hookwrapper=True, tryfirst=True)\ndef pytest_collection(session):\n config = session.config\n with catch_warnings_for_item(\n config=config, ihook=config.hook, when=\"collect\", item=None\n ):\n yield\n\n\n@pytest.hookimpl(hookwrapper=True)\ndef pytest_terminal_summary(terminalreporter):\n config = terminalreporter.config\n with catch_warnings_for_item(\n config=config, ihook=config.hook, when=\"config\", item=None\n ):\n yield", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warnings.py__issue_warning_captured_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/warnings.py__issue_warning_captured_", "embedding": null, "metadata": {"file_path": "src/_pytest/warnings.py", "file_name": "warnings.py", "file_type": "text/x-python", "category": "implementation", "start_line": 164, "end_line": 180, "span_ids": ["_issue_warning_captured"], "tokens": 187}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def _issue_warning_captured(warning, hook, stacklevel):\n \"\"\"\n This function should be used instead of calling ``warnings.warn`` directly when we are in the \"configure\" stage:\n at this point the actual options might not have been set, so we manually trigger the pytest_warning_captured\n hook so we can display this warnings in the terminal. This is a hack until we can sort out #2891.\n\n :param warning: the warning instance.\n :param hook: the hook caller\n :param stacklevel: stacklevel forwarded to warnings.warn\n \"\"\"\n with warnings.catch_warnings(record=True) as records:\n warnings.simplefilter(\"always\", type(warning))\n warnings.warn(warning, stacklevel=stacklevel)\n hook.pytest_warning_captured.call_historic(\n kwargs=dict(warning_message=records[0], when=\"config\", item=None)\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/pytest.py__PYTHON_ARGCOMPLETE_OK_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/pytest.py__PYTHON_ARGCOMPLETE_OK_", "embedding": null, "metadata": {"file_path": "src/pytest.py", "file_name": "pytest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 107, "span_ids": ["impl", "imports:44", "docstring", "impl:8", "impl:6", "imports", "imports:43"], "tokens": 723}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# PYTHON_ARGCOMPLETE_OK\n\"\"\"\npytest: unit and functional testing with Python.\n\"\"\"\n# else we are imported\nfrom _pytest import __version__\nfrom _pytest.assertion import register_assert_rewrite\nfrom _pytest.config import cmdline\nfrom _pytest.config import hookimpl\nfrom _pytest.config import hookspec\nfrom _pytest.config import main\nfrom _pytest.config import UsageError\nfrom _pytest.debugging import pytestPDB as __pytestPDB\nfrom _pytest.fixtures import fillfixtures as _fillfuncargs\nfrom _pytest.fixtures import fixture\nfrom _pytest.fixtures import yield_fixture\nfrom _pytest.freeze_support import freeze_includes\nfrom _pytest.main import Session\nfrom _pytest.mark import MARK_GEN as mark\nfrom _pytest.mark import param\nfrom _pytest.nodes import Collector\nfrom _pytest.nodes import File\nfrom _pytest.nodes import Item\nfrom _pytest.outcomes import exit\nfrom _pytest.outcomes import fail\nfrom _pytest.outcomes import importorskip\nfrom _pytest.outcomes import skip\nfrom _pytest.outcomes import xfail\nfrom _pytest.python import Class\nfrom _pytest.python import Function\nfrom _pytest.python import Instance\nfrom _pytest.python import Module\nfrom _pytest.python import Package\nfrom _pytest.python_api import approx\nfrom _pytest.python_api import raises\nfrom _pytest.recwarn import deprecated_call\nfrom _pytest.recwarn import warns\nfrom _pytest.warning_types import PytestAssertRewriteWarning\nfrom _pytest.warning_types import PytestCacheWarning\nfrom _pytest.warning_types import PytestCollectionWarning\nfrom _pytest.warning_types import PytestConfigWarning\nfrom _pytest.warning_types import PytestDeprecationWarning\nfrom _pytest.warning_types import PytestExperimentalApiWarning\nfrom _pytest.warning_types import PytestUnhandledCoroutineWarning\nfrom _pytest.warning_types import PytestUnknownMarkWarning\nfrom _pytest.warning_types import PytestWarning\nfrom _pytest.warning_types import RemovedInPytest4Warning\n\nset_trace = __pytestPDB.set_trace\n\n__all__ = [\n \"__version__\",\n \"_fillfuncargs\",\n \"approx\",\n \"Class\",\n \"cmdline\",\n \"Collector\",\n \"deprecated_call\",\n \"exit\",\n \"fail\",\n \"File\",\n \"fixture\",\n \"freeze_includes\",\n \"Function\",\n \"hookimpl\",\n \"hookspec\",\n \"importorskip\",\n \"Instance\",\n \"Item\",\n \"main\",\n \"mark\",\n \"Module\",\n \"Package\",\n \"param\",\n \"PytestAssertRewriteWarning\",\n \"PytestCacheWarning\",\n \"PytestCollectionWarning\",\n \"PytestConfigWarning\",\n \"PytestDeprecationWarning\",\n \"PytestExperimentalApiWarning\",\n \"PytestUnhandledCoroutineWarning\",\n \"PytestUnknownMarkWarning\",\n \"PytestWarning\",\n \"raises\",\n \"register_assert_rewrite\",\n \"RemovedInPytest4Warning\",\n \"Session\",\n \"set_trace\",\n \"skip\",\n \"UsageError\",\n \"warns\",\n \"xfail\",\n \"yield_fixture\",\n]\n\nif __name__ == \"__main__\":\n # if run as a script or by 'python -m pytest'\n # we trigger the below \"else\" condition by the following import\n import pytest\n\n raise SystemExit(pytest.main())\nelse:\n\n from _pytest.compat import _setup_collect_fakemodule\n\n _setup_collect_fakemodule()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py__coding_utf_8__prepend_pythonpath.return.os_pathsep_join_str_p_fo": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py__coding_utf_8__prepend_pythonpath.return.os_pathsep_join_str_p_fo", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 25, "span_ids": ["prepend_pythonpath", "imports", "docstring"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport textwrap\nimport types\n\nimport attr\nimport py\nimport six\n\nimport pytest\nfrom _pytest.main import EXIT_NOTESTSCOLLECTED\nfrom _pytest.main import EXIT_USAGEERROR\nfrom _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG\n\n\ndef prepend_pythonpath(*dirs):\n cur = os.getenv(\"PYTHONPATH\")\n if cur:\n dirs += (cur,)\n return os.pathsep.join(str(p) for p in dirs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage_TestGeneralUsage.test_root_conftest_syntax_error.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage_TestGeneralUsage.test_root_conftest_syntax_error.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 28, "end_line": 40, "span_ids": ["TestGeneralUsage.test_config_error", "TestGeneralUsage.test_root_conftest_syntax_error", "TestGeneralUsage"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGeneralUsage(object):\n def test_config_error(self, testdir):\n testdir.copy_example(\"conftest_usageerror/conftest.py\")\n result = testdir.runpytest(testdir.tmpdir)\n assert result.ret == EXIT_USAGEERROR\n result.stderr.fnmatch_lines([\"*ERROR: hello\"])\n result.stdout.fnmatch_lines([\"*pytest_unconfigure_called\"])\n\n def test_root_conftest_syntax_error(self, testdir):\n testdir.makepyfile(conftest=\"raise SyntaxError\\n\")\n result = testdir.runpytest()\n result.stderr.fnmatch_lines([\"*raise SyntaxError*\"])\n assert result.ret != 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_early_hook_error_issue38_1_TestGeneralUsage.test_early_hook_error_issue38_1.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_early_hook_error_issue38_1_TestGeneralUsage.test_early_hook_error_issue38_1.None_2", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 42, "end_line": 60, "span_ids": ["TestGeneralUsage.test_early_hook_error_issue38_1"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGeneralUsage(object):\n\n def test_early_hook_error_issue38_1(self, testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_sessionstart():\n 0 / 0\n \"\"\"\n )\n result = testdir.runpytest(testdir.tmpdir)\n assert result.ret != 0\n # tracestyle is native by default for hook failures\n result.stdout.fnmatch_lines(\n [\"*INTERNALERROR*File*conftest.py*line 2*\", \"*0 / 0*\"]\n )\n result = testdir.runpytest(testdir.tmpdir, \"--fulltrace\")\n assert result.ret != 0\n # tracestyle is native by default for hook failures\n result.stdout.fnmatch_lines(\n [\"*INTERNALERROR*def pytest_sessionstart():*\", \"*INTERNALERROR*0 / 0*\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_early_hook_configure_error_issue38_TestGeneralUsage.test_file_not_found_unconfigure_issue143.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_early_hook_configure_error_issue38_TestGeneralUsage.test_file_not_found_unconfigure_issue143.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 62, "end_line": 93, "span_ids": ["TestGeneralUsage.test_file_not_found_unconfigure_issue143", "TestGeneralUsage.test_file_not_found", "TestGeneralUsage.test_early_hook_configure_error_issue38"], "tokens": 261}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGeneralUsage(object):\n\n def test_early_hook_configure_error_issue38(self, testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_configure():\n 0 / 0\n \"\"\"\n )\n result = testdir.runpytest(testdir.tmpdir)\n assert result.ret != 0\n # here we get it on stderr\n result.stderr.fnmatch_lines(\n [\"*INTERNALERROR*File*conftest.py*line 2*\", \"*0 / 0*\"]\n )\n\n def test_file_not_found(self, testdir):\n result = testdir.runpytest(\"asd\")\n assert result.ret != 0\n result.stderr.fnmatch_lines([\"ERROR: file not found*asd\"])\n\n def test_file_not_found_unconfigure_issue143(self, testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_configure():\n print(\"---configure\")\n def pytest_unconfigure():\n print(\"---unconfigure\")\n \"\"\"\n )\n result = testdir.runpytest(\"-s\", \"asd\")\n assert result.ret == 4 # EXIT_USAGEERROR\n result.stderr.fnmatch_lines([\"ERROR: file not found*asd\"])\n result.stdout.fnmatch_lines([\"*---configure\", \"*---unconfigure\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_config_preparse_plugin_option_TestGeneralUsage.test_config_preparse_plugin_option.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_config_preparse_plugin_option_TestGeneralUsage.test_config_preparse_plugin_option.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 95, "end_line": 110, "span_ids": ["TestGeneralUsage.test_config_preparse_plugin_option"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGeneralUsage(object):\n\n def test_config_preparse_plugin_option(self, testdir):\n testdir.makepyfile(\n pytest_xyz=\"\"\"\n def pytest_addoption(parser):\n parser.addoption(\"--xyz\", dest=\"xyz\", action=\"store\")\n \"\"\"\n )\n testdir.makepyfile(\n test_one=\"\"\"\n def test_option(pytestconfig):\n assert pytestconfig.option.xyz == \"123\"\n \"\"\"\n )\n result = testdir.runpytest(\"-p\", \"pytest_xyz\", \"--xyz=123\", syspathinsert=True)\n assert result.ret == 0\n result.stdout.fnmatch_lines([\"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_early_load_setuptools_name_TestGeneralUsage.test_early_load_setuptools_name.if_load_cov_early_.else_.assert_loaded_myplug": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_early_load_setuptools_name_TestGeneralUsage.test_early_load_setuptools_name.if_load_cov_early_.else_.assert_loaded_myplug", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 112, "end_line": 164, "span_ids": ["TestGeneralUsage.test_early_load_setuptools_name.DummyEntryPoint", "TestGeneralUsage.test_early_load_setuptools_name.DummyEntryPoint:2", "TestGeneralUsage.test_early_load_setuptools_name"], "tokens": 380}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGeneralUsage(object):\n\n @pytest.mark.parametrize(\"load_cov_early\", [True, False])\n def test_early_load_setuptools_name(self, testdir, monkeypatch, load_cov_early):\n pkg_resources = pytest.importorskip(\"pkg_resources\")\n\n testdir.makepyfile(mytestplugin1_module=\"\")\n testdir.makepyfile(mytestplugin2_module=\"\")\n testdir.makepyfile(mycov_module=\"\")\n testdir.syspathinsert()\n\n loaded = []\n\n @attr.s\n class DummyEntryPoint(object):\n name = attr.ib()\n module = attr.ib()\n version = \"1.0\"\n\n @property\n def project_name(self):\n return self.name\n\n def load(self):\n __import__(self.module)\n loaded.append(self.name)\n return sys.modules[self.module]\n\n @property\n def dist(self):\n return self\n\n def _get_metadata(self, *args):\n return []\n\n entry_points = [\n DummyEntryPoint(\"myplugin1\", \"mytestplugin1_module\"),\n DummyEntryPoint(\"myplugin2\", \"mytestplugin2_module\"),\n DummyEntryPoint(\"mycov\", \"mycov_module\"),\n ]\n\n def my_iter(group, name=None):\n assert group == \"pytest11\"\n for ep in entry_points:\n if name is not None and ep.name != name:\n continue\n yield ep\n\n monkeypatch.setattr(pkg_resources, \"iter_entry_points\", my_iter)\n params = (\"-p\", \"mycov\") if load_cov_early else ()\n testdir.runpytest_inprocess(*params)\n if load_cov_early:\n assert loaded == [\"mycov\", \"myplugin1\", \"myplugin2\"]\n else:\n assert loaded == [\"myplugin1\", \"myplugin2\", \"mycov\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_assertion_magic_TestGeneralUsage.test_not_collectable_arguments.result_stderr_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_assertion_magic_TestGeneralUsage.test_not_collectable_arguments.result_stderr_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 166, "end_line": 202, "span_ids": ["TestGeneralUsage.test_nested_import_error", "TestGeneralUsage.test_assertion_magic", "TestGeneralUsage.test_not_collectable_arguments"], "tokens": 287}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGeneralUsage(object):\n\n def test_assertion_magic(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n def test_this():\n x = 0\n assert x\n \"\"\"\n )\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines([\"> assert x\", \"E assert 0\"])\n assert result.ret == 1\n\n def test_nested_import_error(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n import import_fails\n def test_this():\n assert import_fails.a == 1\n \"\"\"\n )\n testdir.makepyfile(import_fails=\"import does_not_work\")\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines(\n [\n # XXX on jython this fails: \"> import import_fails\",\n \"ImportError while importing test module*\",\n \"*No module named *does_not_work*\",\n ]\n )\n assert result.ret == 2\n\n def test_not_collectable_arguments(self, testdir):\n p1 = testdir.makepyfile(\"\")\n p2 = testdir.makefile(\".pyc\", \"123\")\n result = testdir.runpytest(p1, p2)\n assert result.ret\n result.stderr.fnmatch_lines([\"*ERROR: not found:*{}\".format(p2.basename)])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_better_reporting_on_conftest_load_failure_TestGeneralUsage.test_better_reporting_on_conftest_load_failure.result_stderr_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_better_reporting_on_conftest_load_failure_TestGeneralUsage.test_better_reporting_on_conftest_load_failure.result_stderr_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 204, "end_line": 240, "span_ids": ["TestGeneralUsage.test_better_reporting_on_conftest_load_failure"], "tokens": 292}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGeneralUsage(object):\n\n @pytest.mark.filterwarnings(\"default\")\n def test_better_reporting_on_conftest_load_failure(self, testdir, request):\n \"\"\"Show a user-friendly traceback on conftest import failures (#486, #3332)\"\"\"\n testdir.makepyfile(\"\")\n testdir.makeconftest(\n \"\"\"\n def foo():\n import qwerty\n foo()\n \"\"\"\n )\n result = testdir.runpytest(\"--help\")\n result.stdout.fnmatch_lines(\n \"\"\"\n *--version*\n *warning*conftest.py*\n \"\"\"\n )\n result = testdir.runpytest()\n dirname = request.node.name + \"0\"\n exc_name = (\n \"ModuleNotFoundError\" if sys.version_info >= (3, 6) else \"ImportError\"\n )\n result.stderr.fnmatch_lines(\n [\n \"ImportError while loading conftest '*{sep}{dirname}{sep}conftest.py'.\".format(\n dirname=dirname, sep=os.sep\n ),\n \"conftest.py:3: in \",\n \" foo()\",\n \"conftest.py:2: in foo\",\n \" import qwerty\",\n \"E {}: No module named {q}qwerty{q}\".format(\n exc_name, q=\"'\" if six.PY3 else \"\"\n ),\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_early_skip_TestGeneralUsage.test_conftest_printing_shows_if_error.assert_should_be_seen_i": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_early_skip_TestGeneralUsage.test_conftest_printing_shows_if_error.assert_should_be_seen_i", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 242, "end_line": 283, "span_ids": ["TestGeneralUsage.test_issue88_initial_file_multinodes", "TestGeneralUsage.test_conftest_printing_shows_if_error", "TestGeneralUsage.test_early_skip", "TestGeneralUsage.test_issue93_initialnode_importing_capturing"], "tokens": 339}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGeneralUsage(object):\n\n def test_early_skip(self, testdir):\n testdir.mkdir(\"xyz\")\n testdir.makeconftest(\n \"\"\"\n import pytest\n def pytest_collect_directory():\n pytest.skip(\"early\")\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret == EXIT_NOTESTSCOLLECTED\n result.stdout.fnmatch_lines([\"*1 skip*\"])\n\n def test_issue88_initial_file_multinodes(self, testdir):\n testdir.copy_example(\"issue88_initial_file_multinodes\")\n p = testdir.makepyfile(\"def test_hello(): pass\")\n result = testdir.runpytest(p, \"--collect-only\")\n result.stdout.fnmatch_lines([\"*MyFile*test_issue88*\", \"*Module*test_issue88*\"])\n\n def test_issue93_initialnode_importing_capturing(self, testdir):\n testdir.makeconftest(\n \"\"\"\n import sys\n print(\"should not be seen\")\n sys.stderr.write(\"stder42\\\\n\")\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret == EXIT_NOTESTSCOLLECTED\n assert \"should not be seen\" not in result.stdout.str()\n assert \"stderr42\" not in result.stderr.str()\n\n def test_conftest_printing_shows_if_error(self, testdir):\n testdir.makeconftest(\n \"\"\"\n print(\"should be seen\")\n assert 0\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret != 0\n assert \"should be seen\" in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_chdir_TestGeneralUsage.test_chdir.assert_not_result_ret": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_chdir_TestGeneralUsage.test_chdir.assert_not_result_ret", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 285, "end_line": 306, "span_ids": ["TestGeneralUsage.test_chdir"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGeneralUsage(object):\n\n @pytest.mark.skipif(\n not hasattr(py.path.local, \"mksymlinkto\"),\n reason=\"symlink not available on this platform\",\n )\n def test_chdir(self, testdir):\n testdir.tmpdir.join(\"py\").mksymlinkto(py._pydir)\n p = testdir.tmpdir.join(\"main.py\")\n p.write(\n textwrap.dedent(\n \"\"\"\\\n import sys, os\n sys.path.insert(0, '')\n import py\n print(py.__file__)\n print(py.__path__)\n os.chdir(os.path.dirname(os.getcwd()))\n print(py.log)\n \"\"\"\n )\n )\n result = testdir.runpython(p)\n assert not result.ret", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_issue109_sibling_conftests_not_loaded_TestGeneralUsage.test_directory_skipped.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_issue109_sibling_conftests_not_loaded_TestGeneralUsage.test_directory_skipped.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 308, "end_line": 332, "span_ids": ["TestGeneralUsage.test_directory_skipped", "TestGeneralUsage.test_issue109_sibling_conftests_not_loaded"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGeneralUsage(object):\n\n def test_issue109_sibling_conftests_not_loaded(self, testdir):\n sub1 = testdir.mkdir(\"sub1\")\n sub2 = testdir.mkdir(\"sub2\")\n sub1.join(\"conftest.py\").write(\"assert 0\")\n result = testdir.runpytest(sub2)\n assert result.ret == EXIT_NOTESTSCOLLECTED\n sub2.ensure(\"__init__.py\")\n p = sub2.ensure(\"test_hello.py\")\n result = testdir.runpytest(p)\n assert result.ret == EXIT_NOTESTSCOLLECTED\n result = testdir.runpytest(sub1)\n assert result.ret == EXIT_USAGEERROR\n\n def test_directory_skipped(self, testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n def pytest_ignore_collect():\n pytest.skip(\"intentional\")\n \"\"\"\n )\n testdir.makepyfile(\"def test_hello(): pass\")\n result = testdir.runpytest()\n assert result.ret == EXIT_NOTESTSCOLLECTED\n result.stdout.fnmatch_lines([\"*1 skipped*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_multiple_items_per_collector_byid_TestGeneralUsage.test_multiple_items_per_collector_byid.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_multiple_items_per_collector_byid_TestGeneralUsage.test_multiple_items_per_collector_byid.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 334, "end_line": 351, "span_ids": ["TestGeneralUsage.test_multiple_items_per_collector_byid"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGeneralUsage(object):\n\n def test_multiple_items_per_collector_byid(self, testdir):\n c = testdir.makeconftest(\n \"\"\"\n import pytest\n class MyItem(pytest.Item):\n def runtest(self):\n pass\n class MyCollector(pytest.File):\n def collect(self):\n return [MyItem(name=\"xyz\", parent=self)]\n def pytest_collect_file(path, parent):\n if path.basename.startswith(\"conftest\"):\n return MyCollector(path, parent)\n \"\"\"\n )\n result = testdir.runpytest(c.basename + \"::\" + \"xyz\")\n assert result.ret == 0\n result.stdout.fnmatch_lines([\"*1 pass*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_skip_on_generated_funcarg_id_TestGeneralUsage.test_skip_on_generated_funcarg_id.res_stdout_fnmatch_lines_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_skip_on_generated_funcarg_id_TestGeneralUsage.test_skip_on_generated_funcarg_id.res_stdout_fnmatch_lines_", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 353, "end_line": 369, "span_ids": ["TestGeneralUsage.test_skip_on_generated_funcarg_id"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGeneralUsage(object):\n\n def test_skip_on_generated_funcarg_id(self, testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n def pytest_generate_tests(metafunc):\n metafunc.parametrize('x', [3], ids=['hello-123'])\n def pytest_runtest_setup(item):\n print(item.keywords)\n if 'hello-123' in item.keywords:\n pytest.skip(\"hello\")\n assert 0\n \"\"\"\n )\n p = testdir.makepyfile(\"\"\"def test_func(x): pass\"\"\")\n res = testdir.runpytest(p, SHOW_PYTEST_WARNINGS_ARG)\n assert res.ret == 0\n res.stdout.fnmatch_lines([\"*1 skipped*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_direct_addressing_selects_TestGeneralUsage.test_direct_addressing_selects.res_stdout_fnmatch_lines_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_direct_addressing_selects_TestGeneralUsage.test_direct_addressing_selects.res_stdout_fnmatch_lines_", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 371, "end_line": 384, "span_ids": ["TestGeneralUsage.test_direct_addressing_selects"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGeneralUsage(object):\n\n def test_direct_addressing_selects(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n def pytest_generate_tests(metafunc):\n metafunc.parametrize('i', [1, 2], ids=[\"1\", \"2\"])\n def test_func(i):\n pass\n \"\"\"\n )\n res = testdir.runpytest(\n p.basename + \"::\" + \"test_func[1]\", SHOW_PYTEST_WARNINGS_ARG\n )\n assert res.ret == 0\n res.stdout.fnmatch_lines([\"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_direct_addressing_notfound_TestGeneralUsage.test_initialization_error_issue49.assert_sessionstarttime_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_direct_addressing_notfound_TestGeneralUsage.test_initialization_error_issue49.assert_sessionstarttime_", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 386, "end_line": 414, "span_ids": ["TestGeneralUsage.test_initialization_error_issue49", "TestGeneralUsage.test_direct_addressing_notfound", "TestGeneralUsage.test_docstring_on_hookspec"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGeneralUsage(object):\n\n def test_direct_addressing_notfound(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n def test_func():\n pass\n \"\"\"\n )\n res = testdir.runpytest(p.basename + \"::\" + \"test_notfound\")\n assert res.ret\n res.stderr.fnmatch_lines([\"*ERROR*not found*\"])\n\n def test_docstring_on_hookspec(self):\n from _pytest import hookspec\n\n for name, value in vars(hookspec).items():\n if name.startswith(\"pytest_\"):\n assert value.__doc__, \"no docstring for %s\" % name\n\n def test_initialization_error_issue49(self, testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_configure():\n x\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret == 3 # internal error\n result.stderr.fnmatch_lines([\"INTERNAL*pytest_configure*\", \"INTERNAL*x*\"])\n assert \"sessionstarttime\" not in result.stderr.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_issue134_report_error_when_collecting_member_TestGeneralUsage.test_issue134_report_error_when_collecting_member.if_in_lookfor_._usage_error_only_if_ite": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_issue134_report_error_when_collecting_member_TestGeneralUsage.test_issue134_report_error_when_collecting_member.if_in_lookfor_._usage_error_only_if_ite", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 416, "end_line": 428, "span_ids": ["TestGeneralUsage.test_issue134_report_error_when_collecting_member"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGeneralUsage(object):\n\n @pytest.mark.parametrize(\"lookfor\", [\"test_fun.py::test_a\"])\n def test_issue134_report_error_when_collecting_member(self, testdir, lookfor):\n testdir.makepyfile(\n test_fun=\"\"\"\n def test_a():\n pass\n def\"\"\"\n )\n result = testdir.runpytest(lookfor)\n result.stdout.fnmatch_lines([\"*SyntaxError*\"])\n if \"::\" in lookfor:\n result.stderr.fnmatch_lines([\"*ERROR*\"])\n assert result.ret == 4 # usage error only if item not found", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_report_all_failed_collections_initargs_TestGeneralUsage.test_report_all_failed_collections_initargs.assert_result_ret_EXIT": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_report_all_failed_collections_initargs_TestGeneralUsage.test_report_all_failed_collections_initargs.assert_result_ret_EXIT", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 430, "end_line": 444, "span_ids": ["TestGeneralUsage.test_report_all_failed_collections_initargs"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGeneralUsage(object):\n\n def test_report_all_failed_collections_initargs(self, testdir):\n testdir.makeconftest(\n \"\"\"\n from _pytest.main import EXIT_USAGEERROR\n\n def pytest_sessionfinish(exitstatus):\n assert exitstatus == EXIT_USAGEERROR\n print(\"pytest_sessionfinish_called\")\n \"\"\"\n )\n testdir.makepyfile(test_a=\"def\", test_b=\"def\")\n result = testdir.runpytest(\"test_a.py::a\", \"test_b.py::b\")\n result.stderr.fnmatch_lines([\"*ERROR*test_a.py::a*\", \"*ERROR*test_b.py::b*\"])\n result.stdout.fnmatch_lines([\"pytest_sessionfinish_called\"])\n assert result.ret == EXIT_USAGEERROR", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_namespace_import_doesnt_confuse_import_hook_TestGeneralUsage.test_namespace_import_doesnt_confuse_import_hook.assert_res_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_namespace_import_doesnt_confuse_import_hook_TestGeneralUsage.test_namespace_import_doesnt_confuse_import_hook.assert_res_ret_0", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 446, "end_line": 470, "span_ids": ["TestGeneralUsage.test_namespace_import_doesnt_confuse_import_hook"], "tokens": 195}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGeneralUsage(object):\n\n @pytest.mark.usefixtures(\"recwarn\")\n def test_namespace_import_doesnt_confuse_import_hook(self, testdir):\n \"\"\"\n Ref #383. Python 3.3's namespace package messed with our import hooks\n Importing a module that didn't exist, even if the ImportError was\n gracefully handled, would make our test crash.\n\n Use recwarn here to silence this warning in Python 2.7:\n ImportWarning: Not importing directory '...\\not_a_package': missing __init__.py\n \"\"\"\n testdir.mkdir(\"not_a_package\")\n p = testdir.makepyfile(\n \"\"\"\n try:\n from not_a_package import doesnt_exist\n except ImportError:\n # We handle the import error gracefully here\n pass\n\n def test_whatever():\n pass\n \"\"\"\n )\n res = testdir.runpytest(p.basename)\n assert res.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_unknown_option_TestGeneralUsage.test_getsourcelines_error_issue553.res_stdout_fnmatch_lines_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_unknown_option_TestGeneralUsage.test_getsourcelines_error_issue553.res_stdout_fnmatch_lines_", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 472, "end_line": 497, "span_ids": ["TestGeneralUsage.test_unknown_option", "TestGeneralUsage.test_getsourcelines_error_issue553"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGeneralUsage(object):\n\n def test_unknown_option(self, testdir):\n result = testdir.runpytest(\"--qwlkej\")\n result.stderr.fnmatch_lines(\n \"\"\"\n *unrecognized*\n \"\"\"\n )\n\n def test_getsourcelines_error_issue553(self, testdir, monkeypatch):\n monkeypatch.setattr(\"inspect.getsourcelines\", None)\n p = testdir.makepyfile(\n \"\"\"\n def raise_error(obj):\n raise IOError('source code not available')\n\n import inspect\n inspect.getsourcelines = raise_error\n\n def test_foo(invalid_fixture):\n pass\n \"\"\"\n )\n res = testdir.runpytest(p)\n res.stdout.fnmatch_lines(\n [\"*source code not available*\", \"E*fixture 'invalid_fixture' not found\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_plugins_given_as_strings_TestGeneralUsage.test_plugins_given_as_strings.assert_pytest_main_args_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_plugins_given_as_strings_TestGeneralUsage.test_plugins_given_as_strings.assert_pytest_main_args_", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 499, "end_line": 512, "span_ids": ["TestGeneralUsage.test_plugins_given_as_strings"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGeneralUsage(object):\n\n def test_plugins_given_as_strings(self, tmpdir, monkeypatch, _sys_snapshot):\n \"\"\"test that str values passed to main() as `plugins` arg\n are interpreted as module names to be imported and registered.\n #855.\n \"\"\"\n with pytest.raises(ImportError) as excinfo:\n pytest.main([str(tmpdir)], plugins=[\"invalid.module\"])\n assert \"invalid\" in str(excinfo.value)\n\n p = tmpdir.join(\"test_test_plugins_given_as_strings.py\")\n p.write(\"def test_foo(): pass\")\n mod = types.ModuleType(\"myplugin\")\n monkeypatch.setitem(sys.modules, \"myplugin\", mod)\n assert pytest.main(args=[str(tmpdir)], plugins=[\"myplugin\"]) == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_parametrized_with_bytes_regex_TestGeneralUsage.test_parametrized_with_null_bytes.res_assert_outcomes_passe": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestGeneralUsage.test_parametrized_with_bytes_regex_TestGeneralUsage.test_parametrized_with_null_bytes.res_assert_outcomes_passe", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 514, "end_line": 540, "span_ids": ["TestGeneralUsage.test_parametrized_with_bytes_regex", "TestGeneralUsage.test_parametrized_with_null_bytes"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGeneralUsage(object):\n\n def test_parametrized_with_bytes_regex(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n import re\n import pytest\n @pytest.mark.parametrize('r', [re.compile(b'foo')])\n def test_stuff(r):\n pass\n \"\"\"\n )\n res = testdir.runpytest(p)\n res.stdout.fnmatch_lines([\"*1 passed*\"])\n\n def test_parametrized_with_null_bytes(self, testdir):\n \"\"\"Test parametrization with values that contain null bytes and unicode characters (#2644, #2957)\"\"\"\n p = testdir.makepyfile(\n u\"\"\"\n # encoding: UTF-8\n import pytest\n\n @pytest.mark.parametrize(\"data\", [b\"\\\\x00\", \"\\\\x00\", u'a\u00e7\u00e3o'])\n def test_foo(data):\n assert data\n \"\"\"\n )\n res = testdir.runpytest(p)\n res.assert_outcomes(passed=3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestInvocationVariants_TestInvocationVariants.test_invoke_plugin_api.assert_myopt_in_out": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestInvocationVariants_TestInvocationVariants.test_invoke_plugin_api.assert_myopt_in_out", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 543, "end_line": 647, "span_ids": ["TestInvocationVariants.test_python_pytest_package", "TestInvocationVariants.test_equivalence_pytest_pytest", "TestInvocationVariants.test_double_pytestcmdline", "TestInvocationVariants.test_python_minus_m_invocation_fail", "TestInvocationVariants.test_invoke_plugin_api", "TestInvocationVariants.test_import_star_pytest", "TestInvocationVariants.test_import_star_py_dot_test", "TestInvocationVariants.test_earlyinit", "TestInvocationVariants.test_invoke_with_invalid_type", "TestInvocationVariants.test_invoke_with_path", "TestInvocationVariants", "TestInvocationVariants.test_python_minus_m_invocation_ok", "TestInvocationVariants.test_invoke_plugin_api.MyPlugin", "TestInvocationVariants.test_invoke_plugin_api.MyPlugin.pytest_addoption", "TestInvocationVariants.test_pydoc"], "tokens": 757}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestInvocationVariants(object):\n def test_earlyinit(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n assert hasattr(pytest, 'mark')\n \"\"\"\n )\n result = testdir.runpython(p)\n assert result.ret == 0\n\n @pytest.mark.xfail(\"sys.platform.startswith('java')\")\n def test_pydoc(self, testdir):\n for name in (\"py.test\", \"pytest\"):\n result = testdir.runpython_c(\"import {};help({})\".format(name, name))\n assert result.ret == 0\n s = result.stdout.str()\n assert \"MarkGenerator\" in s\n\n def test_import_star_py_dot_test(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n from py.test import *\n #collect\n #cmdline\n #Item\n # assert collect.Item is Item\n # assert collect.Collector is Collector\n main\n skip\n xfail\n \"\"\"\n )\n result = testdir.runpython(p)\n assert result.ret == 0\n\n def test_import_star_pytest(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n from pytest import *\n #Item\n #File\n main\n skip\n xfail\n \"\"\"\n )\n result = testdir.runpython(p)\n assert result.ret == 0\n\n def test_double_pytestcmdline(self, testdir):\n p = testdir.makepyfile(\n run=\"\"\"\n import pytest\n pytest.main()\n pytest.main()\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n def test_hello():\n pass\n \"\"\"\n )\n result = testdir.runpython(p)\n result.stdout.fnmatch_lines([\"*1 passed*\", \"*1 passed*\"])\n\n def test_python_minus_m_invocation_ok(self, testdir):\n p1 = testdir.makepyfile(\"def test_hello(): pass\")\n res = testdir.run(sys.executable, \"-m\", \"pytest\", str(p1))\n assert res.ret == 0\n\n def test_python_minus_m_invocation_fail(self, testdir):\n p1 = testdir.makepyfile(\"def test_fail(): 0/0\")\n res = testdir.run(sys.executable, \"-m\", \"pytest\", str(p1))\n assert res.ret == 1\n\n def test_python_pytest_package(self, testdir):\n p1 = testdir.makepyfile(\"def test_pass(): pass\")\n res = testdir.run(sys.executable, \"-m\", \"pytest\", str(p1))\n assert res.ret == 0\n res.stdout.fnmatch_lines([\"*1 passed*\"])\n\n def test_equivalence_pytest_pytest(self):\n assert pytest.main == py.test.cmdline.main\n\n def test_invoke_with_invalid_type(self, capsys):\n with pytest.raises(\n TypeError, match=\"expected to be a list or tuple of strings, got: '-h'\"\n ):\n pytest.main(\"-h\")\n\n def test_invoke_with_path(self, tmpdir, capsys):\n retcode = pytest.main(tmpdir)\n assert retcode == EXIT_NOTESTSCOLLECTED\n out, err = capsys.readouterr()\n\n def test_invoke_plugin_api(self, testdir, capsys):\n class MyPlugin(object):\n def pytest_addoption(self, parser):\n parser.addoption(\"--myopt\")\n\n pytest.main([\"-h\"], plugins=[MyPlugin()])\n out, err = capsys.readouterr()\n assert \"--myopt\" in out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestInvocationVariants.test_pyargs_importerror_TestInvocationVariants.test_cmdline_python_package.result_stderr_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestInvocationVariants.test_pyargs_importerror_TestInvocationVariants.test_cmdline_python_package.result_stderr_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 649, "end_line": 686, "span_ids": ["TestInvocationVariants.test_cmdline_python_package", "TestInvocationVariants.test_pyargs_importerror"], "tokens": 445}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestInvocationVariants(object):\n\n def test_pyargs_importerror(self, testdir, monkeypatch):\n monkeypatch.delenv(\"PYTHONDONTWRITEBYTECODE\", False)\n path = testdir.mkpydir(\"tpkg\")\n path.join(\"test_hello.py\").write(\"raise ImportError\")\n\n result = testdir.runpytest(\"--pyargs\", \"tpkg.test_hello\", syspathinsert=True)\n assert result.ret != 0\n\n result.stdout.fnmatch_lines([\"collected*0*items*/*1*errors\"])\n\n def test_cmdline_python_package(self, testdir, monkeypatch):\n import warnings\n\n monkeypatch.delenv(\"PYTHONDONTWRITEBYTECODE\", False)\n path = testdir.mkpydir(\"tpkg\")\n path.join(\"test_hello.py\").write(\"def test_hello(): pass\")\n path.join(\"test_world.py\").write(\"def test_world(): pass\")\n result = testdir.runpytest(\"--pyargs\", \"tpkg\")\n assert result.ret == 0\n result.stdout.fnmatch_lines([\"*2 passed*\"])\n result = testdir.runpytest(\"--pyargs\", \"tpkg.test_hello\", syspathinsert=True)\n assert result.ret == 0\n result.stdout.fnmatch_lines([\"*1 passed*\"])\n\n empty_package = testdir.mkpydir(\"empty_package\")\n monkeypatch.setenv(\"PYTHONPATH\", str(empty_package), prepend=os.pathsep)\n # the path which is not a package raises a warning on pypy;\n # no idea why only pypy and not normal python warn about it here\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", ImportWarning)\n result = testdir.runpytest(\"--pyargs\", \".\")\n assert result.ret == 0\n result.stdout.fnmatch_lines([\"*2 passed*\"])\n\n monkeypatch.setenv(\"PYTHONPATH\", str(testdir), prepend=os.pathsep)\n result = testdir.runpytest(\"--pyargs\", \"tpkg.test_missing\", syspathinsert=True)\n assert result.ret != 0\n result.stderr.fnmatch_lines([\"*not*found*test_missing*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestInvocationVariants.test_cmdline_python_namespace_package_TestInvocationVariants.test_invoke_test_and_doctestmodules.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestInvocationVariants.test_cmdline_python_namespace_package_TestInvocationVariants.test_invoke_test_and_doctestmodules.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 688, "end_line": 762, "span_ids": ["TestInvocationVariants.test_cmdline_python_namespace_package", "TestInvocationVariants.test_invoke_test_and_doctestmodules"], "tokens": 627}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestInvocationVariants(object):\n\n def test_cmdline_python_namespace_package(self, testdir, monkeypatch):\n \"\"\"\n test --pyargs option with namespace packages (#1567)\n\n Ref: https://packaging.python.org/guides/packaging-namespace-packages/\n \"\"\"\n monkeypatch.delenv(\"PYTHONDONTWRITEBYTECODE\", raising=False)\n\n search_path = []\n for dirname in \"hello\", \"world\":\n d = testdir.mkdir(dirname)\n search_path.append(d)\n ns = d.mkdir(\"ns_pkg\")\n ns.join(\"__init__.py\").write(\n \"__import__('pkg_resources').declare_namespace(__name__)\"\n )\n lib = ns.mkdir(dirname)\n lib.ensure(\"__init__.py\")\n lib.join(\"test_{}.py\".format(dirname)).write(\n \"def test_{}(): pass\\ndef test_other():pass\".format(dirname)\n )\n\n # The structure of the test directory is now:\n # .\n # \u251c\u2500\u2500 hello\n # \u2502 \u2514\u2500\u2500 ns_pkg\n # \u2502 \u251c\u2500\u2500 __init__.py\n # \u2502 \u2514\u2500\u2500 hello\n # \u2502 \u251c\u2500\u2500 __init__.py\n # \u2502 \u2514\u2500\u2500 test_hello.py\n # \u2514\u2500\u2500 world\n # \u2514\u2500\u2500 ns_pkg\n # \u251c\u2500\u2500 __init__.py\n # \u2514\u2500\u2500 world\n # \u251c\u2500\u2500 __init__.py\n # \u2514\u2500\u2500 test_world.py\n\n # NOTE: the different/reversed ordering is intentional here.\n monkeypatch.setenv(\"PYTHONPATH\", prepend_pythonpath(*search_path))\n for p in search_path:\n monkeypatch.syspath_prepend(p)\n\n # mixed module and filenames:\n monkeypatch.chdir(\"world\")\n result = testdir.runpytest(\"--pyargs\", \"-v\", \"ns_pkg.hello\", \"ns_pkg/world\")\n assert result.ret == 0\n result.stdout.fnmatch_lines(\n [\n \"test_hello.py::test_hello*PASSED*\",\n \"test_hello.py::test_other*PASSED*\",\n \"ns_pkg/world/test_world.py::test_world*PASSED*\",\n \"ns_pkg/world/test_world.py::test_other*PASSED*\",\n \"*4 passed in*\",\n ]\n )\n\n # specify tests within a module\n testdir.chdir()\n result = testdir.runpytest(\n \"--pyargs\", \"-v\", \"ns_pkg.world.test_world::test_other\"\n )\n assert result.ret == 0\n result.stdout.fnmatch_lines(\n [\"*test_world.py::test_other*PASSED*\", \"*1 passed*\"]\n )\n\n def test_invoke_test_and_doctestmodules(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n def test():\n pass\n \"\"\"\n )\n result = testdir.runpytest(str(p) + \"::test\", \"--doctest-modules\")\n result.stdout.fnmatch_lines([\"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestInvocationVariants.test_cmdline_python_package_symlink_TestInvocationVariants.test_cmdline_python_package_symlink.if_hasattr_py_path_local_.else_.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestInvocationVariants.test_cmdline_python_package_symlink_TestInvocationVariants.test_cmdline_python_package_symlink.if_hasattr_py_path_local_.else_.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 764, "end_line": 840, "span_ids": ["TestInvocationVariants.test_cmdline_python_package_symlink"], "tokens": 676}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestInvocationVariants(object):\n\n @pytest.mark.skipif(not hasattr(os, \"symlink\"), reason=\"requires symlinks\")\n def test_cmdline_python_package_symlink(self, testdir, monkeypatch):\n \"\"\"\n test --pyargs option with packages with path containing symlink can\n have conftest.py in their package (#2985)\n \"\"\"\n # dummy check that we can actually create symlinks: on Windows `os.symlink` is available,\n # but normal users require special admin privileges to create symlinks.\n if sys.platform == \"win32\":\n try:\n os.symlink(\n str(testdir.tmpdir.ensure(\"tmpfile\")),\n str(testdir.tmpdir.join(\"tmpfile2\")),\n )\n except OSError as e:\n pytest.skip(six.text_type(e.args[0]))\n monkeypatch.delenv(\"PYTHONDONTWRITEBYTECODE\", raising=False)\n\n dirname = \"lib\"\n d = testdir.mkdir(dirname)\n foo = d.mkdir(\"foo\")\n foo.ensure(\"__init__.py\")\n lib = foo.mkdir(\"bar\")\n lib.ensure(\"__init__.py\")\n lib.join(\"test_bar.py\").write(\n \"def test_bar(): pass\\ndef test_other(a_fixture):pass\"\n )\n lib.join(\"conftest.py\").write(\n \"import pytest\\n@pytest.fixture\\ndef a_fixture():pass\"\n )\n\n d_local = testdir.mkdir(\"local\")\n symlink_location = os.path.join(str(d_local), \"lib\")\n if six.PY2:\n os.symlink(str(d), symlink_location)\n else:\n os.symlink(str(d), symlink_location, target_is_directory=True)\n\n # The structure of the test directory is now:\n # .\n # \u251c\u2500\u2500 local\n # \u2502 \u2514\u2500\u2500 lib -> ../lib\n # \u2514\u2500\u2500 lib\n # \u2514\u2500\u2500 foo\n # \u251c\u2500\u2500 __init__.py\n # \u2514\u2500\u2500 bar\n # \u251c\u2500\u2500 __init__.py\n # \u251c\u2500\u2500 conftest.py\n # \u2514\u2500\u2500 test_bar.py\n\n # NOTE: the different/reversed ordering is intentional here.\n search_path = [\"lib\", os.path.join(\"local\", \"lib\")]\n monkeypatch.setenv(\"PYTHONPATH\", prepend_pythonpath(*search_path))\n for p in search_path:\n monkeypatch.syspath_prepend(p)\n\n # module picked up in symlink-ed directory:\n # It picks up local/lib/foo/bar (symlink) via sys.path.\n result = testdir.runpytest(\"--pyargs\", \"-v\", \"foo.bar\")\n testdir.chdir()\n assert result.ret == 0\n if hasattr(py.path.local, \"mksymlinkto\"):\n result.stdout.fnmatch_lines(\n [\n \"lib/foo/bar/test_bar.py::test_bar PASSED*\",\n \"lib/foo/bar/test_bar.py::test_other PASSED*\",\n \"*2 passed*\",\n ]\n )\n else:\n result.stdout.fnmatch_lines(\n [\n \"*lib/foo/bar/test_bar.py::test_bar PASSED*\",\n \"*lib/foo/bar/test_bar.py::test_other PASSED*\",\n \"*2 passed*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestInvocationVariants.test_cmdline_python_package_not_exists_TestInvocationVariants.test_noclass_discovery_if_not_testcase.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestInvocationVariants.test_cmdline_python_package_not_exists_TestInvocationVariants.test_noclass_discovery_if_not_testcase.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 842, "end_line": 861, "span_ids": ["TestInvocationVariants.test_noclass_discovery_if_not_testcase", "TestInvocationVariants.test_cmdline_python_package_not_exists"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestInvocationVariants(object):\n\n def test_cmdline_python_package_not_exists(self, testdir):\n result = testdir.runpytest(\"--pyargs\", \"tpkgwhatv\")\n assert result.ret\n result.stderr.fnmatch_lines([\"ERROR*file*or*package*not*found*\"])\n\n @pytest.mark.xfail(reason=\"decide: feature or bug\")\n def test_noclass_discovery_if_not_testcase(self, testdir):\n testpath = testdir.makepyfile(\n \"\"\"\n import unittest\n class TestHello(object):\n def test_hello(self):\n assert self.attr\n\n class RealTest(unittest.TestCase, TestHello):\n attr = 42\n \"\"\"\n )\n reprec = testdir.inline_run(testpath)\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestInvocationVariants.test_doctest_id_TestInvocationVariants.test_has_plugin.assert_request_config_plu": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestInvocationVariants.test_doctest_id_TestInvocationVariants.test_has_plugin.assert_request_config_plu", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 863, "end_line": 892, "span_ids": ["TestInvocationVariants.test_core_backward_compatibility", "TestInvocationVariants.test_has_plugin", "TestInvocationVariants.test_doctest_id"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestInvocationVariants(object):\n\n def test_doctest_id(self, testdir):\n testdir.makefile(\n \".txt\",\n \"\"\"\n >>> x=3\n >>> x\n 4\n \"\"\",\n )\n result = testdir.runpytest(\"-rf\")\n lines = result.stdout.str().splitlines()\n for line in lines:\n if line.startswith((\"FAIL \", \"FAILED \")):\n _fail, _sep, testid = line.partition(\" \")\n break\n result = testdir.runpytest(testid, \"-rf\")\n result.stdout.fnmatch_lines([line, \"*1 failed*\"])\n\n def test_core_backward_compatibility(self):\n \"\"\"Test backward compatibility for get_plugin_manager function. See #787.\"\"\"\n import _pytest.config\n\n assert (\n type(_pytest.config.get_plugin_manager())\n is _pytest.config.PytestPluginManager\n )\n\n def test_has_plugin(self, request):\n \"\"\"Test hasplugin function of the plugin manager (#932).\"\"\"\n assert request.config.pluginmanager.hasplugin(\"python\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestDurations_TestDurations.test_calls_showall.for_x_in_23_.for_y_in_call_s.for_line_in_result_stdout.else_.raise_AssertionError_not": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestDurations_TestDurations.test_calls_showall.for_x_in_23_.for_y_in_call_s.for_line_in_result_stdout.else_.raise_AssertionError_not", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 897, "end_line": 939, "span_ids": ["TestDurations.test_calls_show_2", "TestDurations.test_calls", "TestDurations.test_calls_showall", "TestDurations"], "tokens": 351}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDurations(object):\n source = \"\"\"\n import time\n frag = 0.002\n def test_something():\n pass\n def test_2():\n time.sleep(frag*5)\n def test_1():\n time.sleep(frag)\n def test_3():\n time.sleep(frag*10)\n \"\"\"\n\n def test_calls(self, testdir):\n testdir.makepyfile(self.source)\n result = testdir.runpytest(\"--durations=10\")\n assert result.ret == 0\n result.stdout.fnmatch_lines_random(\n [\"*durations*\", \"*call*test_3*\", \"*call*test_2*\"]\n )\n result.stdout.fnmatch_lines(\n [\"(0.00 durations hidden. Use -vv to show these durations.)\"]\n )\n\n def test_calls_show_2(self, testdir):\n testdir.makepyfile(self.source)\n result = testdir.runpytest(\"--durations=2\")\n assert result.ret == 0\n lines = result.stdout.get_lines_after(\"*slowest*durations*\")\n assert \"4 passed\" in lines[2]\n\n def test_calls_showall(self, testdir):\n testdir.makepyfile(self.source)\n result = testdir.runpytest(\"--durations=0\")\n assert result.ret == 0\n for x in \"23\":\n for y in (\"call\",): # 'setup', 'call', 'teardown':\n for line in result.stdout.lines:\n if (\"test_%s\" % x) in line and y in line:\n break\n else:\n raise AssertionError(\"not found {} {}\".format(x, y))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestDurations.test_calls_showall_verbose_TestDurations.test_calls_showall_verbose.for_x_in_123_.for_y_in_call_s.for_line_in_result_stdout.else_.raise_AssertionError_not": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestDurations.test_calls_showall_verbose_TestDurations.test_calls_showall_verbose.for_x_in_123_.for_y_in_call_s.for_line_in_result_stdout.else_.raise_AssertionError_not", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 941, "end_line": 951, "span_ids": ["TestDurations.test_calls_showall_verbose"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDurations(object):\n\n def test_calls_showall_verbose(self, testdir):\n testdir.makepyfile(self.source)\n result = testdir.runpytest(\"--durations=0\", \"-vv\")\n assert result.ret == 0\n for x in \"123\":\n for y in (\"call\",): # 'setup', 'call', 'teardown':\n for line in result.stdout.lines:\n if (\"test_%s\" % x) in line and y in line:\n break\n else:\n raise AssertionError(\"not found {} {}\".format(x, y))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestDurations.test_with_deselected_TestDurations.test_with_not.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestDurations.test_with_deselected_TestDurations.test_with_not.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 953, "end_line": 972, "span_ids": ["TestDurations.test_with_deselected", "TestDurations.test_with_not", "TestDurations.test_with_failing_collection"], "tokens": 218}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDurations(object):\n\n def test_with_deselected(self, testdir):\n testdir.makepyfile(self.source)\n result = testdir.runpytest(\"--durations=2\", \"-k test_2\")\n assert result.ret == 0\n result.stdout.fnmatch_lines([\"*durations*\", \"*call*test_2*\"])\n\n def test_with_failing_collection(self, testdir):\n testdir.makepyfile(self.source)\n testdir.makepyfile(test_collecterror=\"\"\"xyz\"\"\")\n result = testdir.runpytest(\"--durations=2\", \"-k test_1\")\n assert result.ret == 2\n result.stdout.fnmatch_lines([\"*Interrupted: 1 errors during collection*\"])\n # Collection errors abort test execution, therefore no duration is\n # output\n assert \"duration\" not in result.stdout.str()\n\n def test_with_not(self, testdir):\n testdir.makepyfile(self.source)\n result = testdir.runpytest(\"-k not 1\")\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestDurationWithFixture_TestDurationWithFixture.test_setup_function.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_TestDurationWithFixture_TestDurationWithFixture.test_setup_function.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 975, "end_line": 1000, "span_ids": ["TestDurationWithFixture.test_setup_function", "TestDurationWithFixture"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDurationWithFixture(object):\n source = \"\"\"\n import pytest\n import time\n frag = 0.01\n\n @pytest.fixture\n def setup_fixt():\n time.sleep(frag)\n\n def test_1(setup_fixt):\n time.sleep(frag)\n \"\"\"\n\n def test_setup_function(self, testdir):\n testdir.makepyfile(self.source)\n result = testdir.runpytest(\"--durations=10\")\n assert result.ret == 0\n\n result.stdout.fnmatch_lines_random(\n \"\"\"\n *durations*\n * setup *test_1*\n * call *test_1*\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_test_zipimport_hook_test_zipimport_hook.assert_INTERNALERROR_n": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_test_zipimport_hook_test_zipimport_hook.assert_INTERNALERROR_n", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 1003, "end_line": 1021, "span_ids": ["test_zipimport_hook"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_zipimport_hook(testdir, tmpdir):\n \"\"\"Test package loader is being used correctly (see #1837).\"\"\"\n zipapp = pytest.importorskip(\"zipapp\")\n testdir.tmpdir.join(\"app\").ensure(dir=1)\n testdir.makepyfile(\n **{\n \"app/foo.py\": \"\"\"\n import pytest\n def main():\n pytest.main(['--pyarg', 'foo'])\n \"\"\"\n }\n )\n target = tmpdir.join(\"foo.zip\")\n zipapp.create_archive(str(testdir.tmpdir.join(\"app\")), str(target), main=\"foo:main\")\n result = testdir.runpython(target)\n assert result.ret == 0\n result.stderr.fnmatch_lines([\"*not found*foo*\"])\n assert \"INTERNALERROR>\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_test_import_plugin_unicode_name_test_pytest_plugins_as_module.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_test_import_plugin_unicode_name_test_pytest_plugins_as_module.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 1024, "end_line": 1051, "span_ids": ["test_pytest_plugins_as_module", "test_import_plugin_unicode_name"], "tokens": 183}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_import_plugin_unicode_name(testdir):\n testdir.makepyfile(myplugin=\"\")\n testdir.makepyfile(\n \"\"\"\n def test(): pass\n \"\"\"\n )\n testdir.makeconftest(\n \"\"\"\n pytest_plugins = [u'myplugin']\n \"\"\"\n )\n r = testdir.runpytest()\n assert r.ret == 0\n\n\ndef test_pytest_plugins_as_module(testdir):\n \"\"\"Do not raise an error if pytest_plugins attribute is a module (#3899)\"\"\"\n testdir.makepyfile(\n **{\n \"__init__.py\": \"\",\n \"pytest_plugins.py\": \"\",\n \"conftest.py\": \"from . import pytest_plugins\",\n \"test_foo.py\": \"def test(): pass\",\n }\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"* 1 passed in *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_test_deferred_hook_checking_test_deferred_hook_checking.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_test_deferred_hook_checking_test_deferred_hook_checking.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 1054, "end_line": 1081, "span_ids": ["test_deferred_hook_checking"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_deferred_hook_checking(testdir):\n \"\"\"\n Check hooks as late as possible (#1821).\n \"\"\"\n testdir.syspathinsert()\n testdir.makepyfile(\n **{\n \"plugin.py\": \"\"\"\n class Hooks(object):\n def pytest_my_hook(self, config):\n pass\n\n def pytest_configure(config):\n config.pluginmanager.add_hookspecs(Hooks)\n \"\"\",\n \"conftest.py\": \"\"\"\n pytest_plugins = ['plugin']\n def pytest_my_hook(config):\n return 40\n \"\"\",\n \"test_foo.py\": \"\"\"\n def test(request):\n assert request.config.hook.pytest_my_hook(config=request.config) == [40]\n \"\"\",\n }\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"* 1 passed *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_test_fixture_values_leak_test_fixture_values_leak.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_test_fixture_values_leak_test_fixture_values_leak.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 1084, "end_line": 1126, "span_ids": ["test_fixture_values_leak"], "tokens": 264}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fixture_values_leak(testdir):\n \"\"\"Ensure that fixture objects are properly destroyed by the garbage collector at the end of their expected\n life-times (#2981).\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n import attr\n import gc\n import pytest\n import weakref\n\n @attr.s\n class SomeObj(object):\n name = attr.ib()\n\n fix_of_test1_ref = None\n session_ref = None\n\n @pytest.fixture(scope='session')\n def session_fix():\n global session_ref\n obj = SomeObj(name='session-fixture')\n session_ref = weakref.ref(obj)\n return obj\n\n @pytest.fixture\n def fix(session_fix):\n global fix_of_test1_ref\n obj = SomeObj(name='local-fixture')\n fix_of_test1_ref = weakref.ref(obj)\n return obj\n\n def test1(fix):\n assert fix_of_test1_ref() is fix\n\n def test2():\n gc.collect()\n # fixture \"fix\" created during test1 must have been destroyed by now\n assert fix_of_test1_ref() is None\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"* 2 passed *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_test_fixture_order_respects_scope_test_fixture_order_respects_scope.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_test_fixture_order_respects_scope_test_fixture_order_respects_scope.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 1129, "end_line": 1152, "span_ids": ["test_fixture_order_respects_scope"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fixture_order_respects_scope(testdir):\n \"\"\"Ensure that fixtures are created according to scope order, regression test for #2405\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n data = {}\n\n @pytest.fixture(scope='module')\n def clean_data():\n data.clear()\n\n @pytest.fixture(autouse=True)\n def add_data():\n data.update(value=True)\n\n @pytest.mark.usefixtures('clean_data')\n def test_value():\n assert data.get('value')\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_test_frame_leak_on_failing_test_test_frame_leak_on_failing_test.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_test_frame_leak_on_failing_test_test_frame_leak_on_failing_test.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 1155, "end_line": 1184, "span_ids": ["test_frame_leak_on_failing_test"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_frame_leak_on_failing_test(testdir):\n \"\"\"pytest would leak garbage referencing the frames of tests that failed that could never be reclaimed (#2798)\n\n Unfortunately it was not possible to remove the actual circles because most of them\n are made of traceback objects which cannot be weakly referenced. Those objects at least\n can be eventually claimed by the garbage collector.\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n import gc\n import weakref\n\n class Obj:\n pass\n\n ref = None\n\n def test1():\n obj = Obj()\n global ref\n ref = weakref.ref(obj)\n assert 0\n\n def test2():\n gc.collect()\n assert ref() is None\n \"\"\"\n )\n result = testdir.runpytest_subprocess()\n result.stdout.fnmatch_lines([\"*1 failed, 1 passed in*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_test_fixture_mock_integration_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/acceptance_test.py_test_fixture_mock_integration_", "embedding": null, "metadata": {"file_path": "testing/acceptance_test.py", "file_name": "acceptance_test.py", "file_type": "text/x-python", "category": "test", "start_line": 1187, "end_line": 1225, "span_ids": ["test_warn_on_async_function", "test_usage_error_code", "test_fixture_mock_integration"], "tokens": 273}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fixture_mock_integration(testdir):\n \"\"\"Test that decorators applied to fixture are left working (#3774)\"\"\"\n p = testdir.copy_example(\"acceptance/fixture_mock_integration.py\")\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines([\"*1 passed*\"])\n\n\ndef test_usage_error_code(testdir):\n result = testdir.runpytest(\"-unknown-option-\")\n assert result.ret == EXIT_USAGEERROR\n\n\n@pytest.mark.skipif(\n sys.version_info[:2] < (3, 5), reason=\"async def syntax python 3.5+ only\"\n)\n@pytest.mark.filterwarnings(\"default\")\ndef test_warn_on_async_function(testdir):\n testdir.makepyfile(\n test_async=\"\"\"\n async def test_1():\n pass\n async def test_2():\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"test_async.py::test_1\",\n \"test_async.py::test_2\",\n \"*Coroutine functions are not natively supported*\",\n \"*2 skipped, 2 warnings in*\",\n ]\n )\n # ensure our warning message appears only once\n assert (\n result.stdout.str().count(\"Coroutine functions are not natively supported\") == 1\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_code.py__coding_utf_8_test_unicode_handling_syntax_error.if_sys_version_info_0_.text_type_excinfo_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_code.py__coding_utf_8_test_unicode_handling_syntax_error.if_sys_version_info_0_.text_type_excinfo_", "embedding": null, "metadata": {"file_path": "testing/code/test_code.py", "file_name": "test_code.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 109, "span_ids": ["x", "test_getstatement_empty_fullsource", "test_code_gives_back_name_for_not_existing_file", "imports:10", "test_code_with_class", "test_code_from_func", "impl", "test_frame_getsourcelineno_myself", "test_code_source", "imports:9", "docstring", "test_unicode_handling", "imports", "test_code_with_class.A", "test_code_with_class.A:2", "test_ne", "test_unicode_handling_syntax_error", "impl:2", "test_code_fullsource"], "tokens": 641}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# coding: utf-8\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\n\nfrom six import text_type\nfrom test_excinfo import TWMock\n\nimport _pytest._code\nimport pytest\n\ntry:\n import mock\nexcept ImportError:\n import unittest.mock as mock\n\n\ndef test_ne():\n code1 = _pytest._code.Code(compile('foo = \"bar\"', \"\", \"exec\"))\n assert code1 == code1\n code2 = _pytest._code.Code(compile('foo = \"baz\"', \"\", \"exec\"))\n assert code2 != code1\n\n\ndef test_code_gives_back_name_for_not_existing_file():\n name = \"abc-123\"\n co_code = compile(\"pass\\n\", name, \"exec\")\n assert co_code.co_filename == name\n code = _pytest._code.Code(co_code)\n assert str(code.path) == name\n assert code.fullsource is None\n\n\ndef test_code_with_class():\n class A(object):\n pass\n\n pytest.raises(TypeError, _pytest._code.Code, A)\n\n\ndef x():\n raise NotImplementedError()\n\n\ndef test_code_fullsource():\n code = _pytest._code.Code(x)\n full = code.fullsource\n assert \"test_code_fullsource()\" in str(full)\n\n\ndef test_code_source():\n code = _pytest._code.Code(x)\n src = code.source()\n expected = \"\"\"def x():\n raise NotImplementedError()\"\"\"\n assert str(src) == expected\n\n\ndef test_frame_getsourcelineno_myself():\n def func():\n return sys._getframe(0)\n\n f = func()\n f = _pytest._code.Frame(f)\n source, lineno = f.code.fullsource, f.lineno\n assert source[lineno].startswith(\" return sys._getframe(0)\")\n\n\ndef test_getstatement_empty_fullsource():\n def func():\n return sys._getframe(0)\n\n f = func()\n f = _pytest._code.Frame(f)\n with mock.patch.object(f.code.__class__, \"fullsource\", None):\n assert f.statement == \"\"\n\n\ndef test_code_from_func():\n co = _pytest._code.Code(test_frame_getsourcelineno_myself)\n assert co.firstlineno\n assert co.path\n\n\ndef test_unicode_handling():\n value = u\"\u0105\u0107\".encode(\"UTF-8\")\n\n def f():\n raise Exception(value)\n\n excinfo = pytest.raises(Exception, f)\n text_type(excinfo)\n if sys.version_info < (3,):\n bytes(excinfo)\n\n\n@pytest.mark.skipif(sys.version_info[0] >= 3, reason=\"python 2 only issue\")\ndef test_unicode_handling_syntax_error():\n value = u\"\u0105\u0107\".encode(\"UTF-8\")\n\n def f():\n raise SyntaxError(\"invalid syntax\", (None, 1, 3, value))\n\n excinfo = pytest.raises(Exception, f)\n str(excinfo)\n if sys.version_info[0] < 3:\n text_type(excinfo)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_code.py_test_code_getargs_test_code_getargs.assert_c4_getargs_var_Tru": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_code.py_test_code_getargs_test_code_getargs.assert_c4_getargs_var_Tru", "embedding": null, "metadata": {"file_path": "testing/code/test_code.py", "file_name": "test_code.py", "file_type": "text/x-python", "category": "test", "start_line": 112, "end_line": 135, "span_ids": ["test_code_getargs"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_code_getargs():\n def f1(x):\n raise NotImplementedError()\n\n c1 = _pytest._code.Code(f1)\n assert c1.getargs(var=True) == (\"x\",)\n\n def f2(x, *y):\n raise NotImplementedError()\n\n c2 = _pytest._code.Code(f2)\n assert c2.getargs(var=True) == (\"x\", \"y\")\n\n def f3(x, **z):\n raise NotImplementedError()\n\n c3 = _pytest._code.Code(f3)\n assert c3.getargs(var=True) == (\"x\", \"z\")\n\n def f4(x, *y, **z):\n raise NotImplementedError()\n\n c4 = _pytest._code.Code(f4)\n assert c4.getargs(var=True) == (\"x\", \"y\", \"z\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_code.py_test_frame_getargs_test_frame_getargs.assert_fr4_getargs_var_Tr": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_code.py_test_frame_getargs_test_frame_getargs.assert_fr4_getargs_var_Tr", "embedding": null, "metadata": {"file_path": "testing/code/test_code.py", "file_name": "test_code.py", "file_type": "text/x-python", "category": "test", "start_line": 138, "end_line": 161, "span_ids": ["test_frame_getargs"], "tokens": 252}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_frame_getargs():\n def f1(x):\n return sys._getframe(0)\n\n fr1 = _pytest._code.Frame(f1(\"a\"))\n assert fr1.getargs(var=True) == [(\"x\", \"a\")]\n\n def f2(x, *y):\n return sys._getframe(0)\n\n fr2 = _pytest._code.Frame(f2(\"a\", \"b\", \"c\"))\n assert fr2.getargs(var=True) == [(\"x\", \"a\"), (\"y\", (\"b\", \"c\"))]\n\n def f3(x, **z):\n return sys._getframe(0)\n\n fr3 = _pytest._code.Frame(f3(\"a\", b=\"c\"))\n assert fr3.getargs(var=True) == [(\"x\", \"a\"), (\"z\", {\"b\": \"c\"})]\n\n def f4(x, *y, **z):\n return sys._getframe(0)\n\n fr4 = _pytest._code.Frame(f4(\"a\", \"b\", c=\"d\"))\n assert fr4.getargs(var=True) == [(\"x\", \"a\"), (\"y\", (\"b\",)), (\"z\", {\"c\": \"d\"})]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_code.py_TestExceptionInfo_TestTracebackEntry.test_getsource.assert_assert_False_in_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_code.py_TestExceptionInfo_TestTracebackEntry.test_getsource.assert_assert_False_in_", "embedding": null, "metadata": {"file_path": "testing/code/test_code.py", "file_name": "test_code.py", "file_type": "text/x-python", "category": "test", "start_line": 164, "end_line": 192, "span_ids": ["TestExceptionInfo.test_from_current_with_missing", "TestExceptionInfo", "TestTracebackEntry.test_getsource", "TestExceptionInfo.test_bad_getsource", "TestTracebackEntry"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestExceptionInfo(object):\n def test_bad_getsource(self):\n try:\n if False:\n pass\n else:\n assert False\n except AssertionError:\n exci = _pytest._code.ExceptionInfo.from_current()\n assert exci.getrepr()\n\n def test_from_current_with_missing(self):\n with pytest.raises(AssertionError, match=\"no current exception\"):\n _pytest._code.ExceptionInfo.from_current()\n\n\nclass TestTracebackEntry(object):\n def test_getsource(self):\n try:\n if False:\n pass\n else:\n assert False\n except AssertionError:\n exci = _pytest._code.ExceptionInfo.from_current()\n entry = exci.traceback[0]\n source = entry.getsource()\n assert len(source) == 6\n assert \"assert False\" in source[5]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_code.py_TestReprFuncArgs_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_code.py_TestReprFuncArgs_", "embedding": null, "metadata": {"file_path": "testing/code/test_code.py", "file_name": "test_code.py", "file_type": "text/x-python", "category": "test", "start_line": 195, "end_line": 212, "span_ids": ["TestReprFuncArgs", "TestReprFuncArgs.test_not_raise_exception_with_mixed_encoding"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestReprFuncArgs(object):\n def test_not_raise_exception_with_mixed_encoding(self):\n from _pytest._code.code import ReprFuncArgs\n\n tw = TWMock()\n\n args = [(\"unicode_string\", u\"S\u00e3o Paulo\"), (\"utf8_string\", b\"S\\xc3\\xa3o Paulo\")]\n\n r = ReprFuncArgs(args)\n r.toterminal(tw)\n if sys.version_info[0] >= 3:\n assert (\n tw.lines[0]\n == r\"unicode_string = S\u00e3o Paulo, utf8_string = b'S\\xc3\\xa3o Paulo'\"\n )\n else:\n assert tw.lines[0] == \"unicode_string = S\u00e3o Paulo, utf8_string = S\u00e3o Paulo\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py__coding_utf_8__limited_recursion_depth.sys_setrecursionlimit_bef": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py__coding_utf_8__limited_recursion_depth.sys_setrecursionlimit_bef", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 40, "span_ids": ["impl", "imports:18", "limited_recursion_depth", "docstring", "impl:2", "imports"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport operator\nimport os\nimport sys\nimport textwrap\n\nimport py\nimport six\nfrom six.moves import queue\nfrom test_source import astonly\n\nimport _pytest\nimport pytest\nfrom _pytest._code.code import ExceptionChainRepr\nfrom _pytest._code.code import ExceptionInfo\nfrom _pytest._code.code import FormattedExcinfo\nfrom _pytest._code.code import ReprExceptionInfo\n\ntry:\n import importlib\nexcept ImportError:\n invalidate_import_caches = None\nelse:\n invalidate_import_caches = getattr(importlib, \"invalidate_caches\", None)\n\nfailsonjython = pytest.mark.xfail(\"sys.platform.startswith('java')\")\n\npytest_version_info = tuple(map(int, pytest.__version__.split(\".\")[:3]))\n\n\n@pytest.fixture\ndef limited_recursion_depth():\n before = sys.getrecursionlimit()\n sys.setrecursionlimit(150)\n yield\n sys.setrecursionlimit(before)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TWMock_test_excinfo_simple.assert_info_type_Value": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TWMock_test_excinfo_simple.assert_info_type_Value", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 42, "end_line": 74, "span_ids": ["TWMock.markup", "TWMock:4", "TWMock.sep", "test_excinfo_simple", "TWMock", "TWMock.line", "TWMock.get_write_msg", "TWMock.write"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TWMock(object):\n WRITE = object()\n\n def __init__(self):\n self.lines = []\n self.is_writing = False\n\n def sep(self, sep, line=None):\n self.lines.append((sep, line))\n\n def write(self, msg, **kw):\n self.lines.append((TWMock.WRITE, msg))\n\n def line(self, line, **kw):\n self.lines.append(line)\n\n def markup(self, text, **kw):\n return text\n\n def get_write_msg(self, idx):\n flag, msg = self.lines[idx]\n assert flag == TWMock.WRITE\n return msg\n\n fullwidth = 80\n\n\ndef test_excinfo_simple():\n try:\n raise ValueError\n except ValueError:\n info = _pytest._code.ExceptionInfo.from_current()\n assert info.type == ValueError", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_test_excinfo_getstatement_test_excinfo_getstatement._xxx": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_test_excinfo_getstatement_test_excinfo_getstatement._xxx", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 77, "end_line": 98, "span_ids": ["test_excinfo_getstatement"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_excinfo_getstatement():\n def g():\n raise ValueError\n\n def f():\n g()\n\n try:\n f()\n except ValueError:\n excinfo = _pytest._code.ExceptionInfo.from_current()\n linenumbers = [\n _pytest._code.getrawcode(f).co_firstlineno - 1 + 4,\n _pytest._code.getrawcode(f).co_firstlineno - 1 + 1,\n _pytest._code.getrawcode(g).co_firstlineno - 1 + 1,\n ]\n values = list(excinfo.traceback)\n foundlinenumbers = [x.lineno for x in values]\n assert foundlinenumbers == linenumbers\n # for x in info:\n # print \"%s:%d %s\" %(x.path.relto(root), x.lineno, x.statement)\n # xxx", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py__testchain_for_getentrie_TestTraceback_f_g_h.test_traceback_entry_getsource.assert_s_endswith_raise_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py__testchain_for_getentrie_TestTraceback_f_g_h.test_traceback_entry_getsource.assert_s_endswith_raise_", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 101, "end_line": 147, "span_ids": ["TestTraceback_f_g_h.test_traceback_entry_getsource", "f", "TestTraceback_f_g_h", "TestTraceback_f_g_h.test_traceback_entries", "test_excinfo_getstatement", "h", "g", "TestTraceback_f_g_h.setup_method"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# testchain for getentries test below\n\n\ndef f():\n #\n raise ValueError\n #\n\n\ndef g():\n #\n __tracebackhide__ = True\n f()\n #\n\n\ndef h():\n #\n g()\n #\n\n\nclass TestTraceback_f_g_h(object):\n def setup_method(self, method):\n try:\n h()\n except ValueError:\n self.excinfo = _pytest._code.ExceptionInfo.from_current()\n\n def test_traceback_entries(self):\n tb = self.excinfo.traceback\n entries = list(tb)\n assert len(tb) == 4 # maybe fragile test\n assert len(entries) == 4 # maybe fragile test\n names = [\"f\", \"g\", \"h\"]\n for entry in entries:\n try:\n names.remove(entry.frame.code.name)\n except ValueError:\n pass\n assert not names\n\n def test_traceback_entry_getsource(self):\n tb = self.excinfo.traceback\n s = str(tb[-1].getsource())\n assert s.startswith(\"def f():\")\n assert s.endswith(\"raise ValueError\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestTraceback_f_g_h.test_traceback_entry_getsource_in_construct_TestTraceback_f_g_h.test_traceback_entry_getsource_in_construct.try_.except_NameError_.assert_s_strip_endswith": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestTraceback_f_g_h.test_traceback_entry_getsource_in_construct_TestTraceback_f_g_h.test_traceback_entry_getsource_in_construct.try_.except_NameError_.assert_s_strip_endswith", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 150, "end_line": 170, "span_ids": ["TestTraceback_f_g_h.test_traceback_entry_getsource_in_construct"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTraceback_f_g_h(object):\n\n @astonly\n @failsonjython\n def test_traceback_entry_getsource_in_construct(self):\n source = _pytest._code.Source(\n \"\"\"\\\n def xyz():\n try:\n raise ValueError\n except somenoname:\n pass\n xyz()\n \"\"\"\n )\n try:\n exec(source.compile())\n except NameError:\n tb = _pytest._code.ExceptionInfo.from_current().traceback\n print(tb[-1].getsource())\n s = str(tb[-1].getsource())\n assert s.startswith(\"def xyz():\\n try:\")\n assert s.strip().endswith(\"except somenoname:\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestTraceback_f_g_h.test_traceback_cut_TestTraceback_f_g_h.test_traceback_filter.assert_len_ntraceback_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestTraceback_f_g_h.test_traceback_cut_TestTraceback_f_g_h.test_traceback_filter.assert_len_ntraceback_", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 170, "end_line": 193, "span_ids": ["TestTraceback_f_g_h.test_traceback_cut_excludepath", "TestTraceback_f_g_h.test_traceback_filter", "TestTraceback_f_g_h.test_traceback_cut"], "tokens": 267}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTraceback_f_g_h(object):\n\n def test_traceback_cut(self):\n co = _pytest._code.Code(f)\n path, firstlineno = co.path, co.firstlineno\n traceback = self.excinfo.traceback\n newtraceback = traceback.cut(path=path, firstlineno=firstlineno)\n assert len(newtraceback) == 1\n newtraceback = traceback.cut(path=path, lineno=firstlineno + 2)\n assert len(newtraceback) == 1\n\n def test_traceback_cut_excludepath(self, testdir):\n p = testdir.makepyfile(\"def f(): raise ValueError\")\n with pytest.raises(ValueError) as excinfo:\n p.pyimport().f()\n basedir = py.path.local(pytest.__file__).dirpath()\n newtraceback = excinfo.traceback.cut(excludepath=basedir)\n for x in newtraceback:\n if hasattr(x, \"path\"):\n assert not py.path.local(x.path).relto(basedir)\n assert newtraceback[-1].frame.code.path == p\n\n def test_traceback_filter(self):\n traceback = self.excinfo.traceback\n ntraceback = traceback.filter()\n assert len(ntraceback) == len(traceback) - 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestTraceback_f_g_h.test_traceback_filter_selective_TestTraceback_f_g_h.test_traceback_filter_selective.if_matching_.else_.assert_len_ntraceback_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestTraceback_f_g_h.test_traceback_filter_selective_TestTraceback_f_g_h.test_traceback_filter_selective.if_matching_.else_.assert_len_ntraceback_", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 195, "end_line": 231, "span_ids": ["TestTraceback_f_g_h.test_traceback_filter_selective"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTraceback_f_g_h(object):\n\n @pytest.mark.parametrize(\n \"tracebackhide, matching\",\n [\n (lambda info: True, True),\n (lambda info: False, False),\n (operator.methodcaller(\"errisinstance\", ValueError), True),\n (operator.methodcaller(\"errisinstance\", IndexError), False),\n ],\n )\n def test_traceback_filter_selective(self, tracebackhide, matching):\n def f():\n #\n raise ValueError\n #\n\n def g():\n #\n __tracebackhide__ = tracebackhide\n f()\n #\n\n def h():\n #\n g()\n #\n\n excinfo = pytest.raises(ValueError, h)\n traceback = excinfo.traceback\n ntraceback = traceback.filter()\n print(\"old: {!r}\".format(traceback))\n print(\"new: {!r}\".format(ntraceback))\n\n if matching:\n assert len(ntraceback) == len(traceback) - 2\n else:\n # -1 because of the __tracebackhide__ in pytest.raises\n assert len(ntraceback) == len(traceback) - 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestTraceback_f_g_h.test_traceback_recursion_index_TestTraceback_f_g_h.test_traceback_messy_recursion.assert_excinfo_traceback_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestTraceback_f_g_h.test_traceback_recursion_index_TestTraceback_f_g_h.test_traceback_messy_recursion.assert_excinfo_traceback_", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 233, "end_line": 292, "span_ids": ["TestTraceback_f_g_h.test_traceback_recursion_index", "TestTraceback_f_g_h.test_traceback_messy_recursion", "TestTraceback_f_g_h.test_traceback_no_recursion_index", "TestTraceback_f_g_h.test_traceback_only_specific_recursion_errors"], "tokens": 401}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTraceback_f_g_h(object):\n\n def test_traceback_recursion_index(self):\n def f(n):\n if n < 10:\n n += 1\n f(n)\n\n excinfo = pytest.raises(RuntimeError, f, 8)\n traceback = excinfo.traceback\n recindex = traceback.recursionindex()\n assert recindex == 3\n\n def test_traceback_only_specific_recursion_errors(self, monkeypatch):\n def f(n):\n if n == 0:\n raise RuntimeError(\"hello\")\n f(n - 1)\n\n excinfo = pytest.raises(RuntimeError, f, 25)\n monkeypatch.delattr(excinfo.traceback.__class__, \"recursionindex\")\n repr = excinfo.getrepr()\n assert \"RuntimeError: hello\" in str(repr.reprcrash)\n\n def test_traceback_no_recursion_index(self):\n def do_stuff():\n raise RuntimeError\n\n def reraise_me():\n import sys\n\n exc, val, tb = sys.exc_info()\n six.reraise(exc, val, tb)\n\n def f(n):\n try:\n do_stuff()\n except: # noqa\n reraise_me()\n\n excinfo = pytest.raises(RuntimeError, f, 8)\n traceback = excinfo.traceback\n recindex = traceback.recursionindex()\n assert recindex is None\n\n def test_traceback_messy_recursion(self):\n # XXX: simplified locally testable version\n decorator = pytest.importorskip(\"decorator\").decorator\n\n def log(f, *k, **kw):\n print(\"{} {}\".format(k, kw))\n f(*k, **kw)\n\n log = decorator(log)\n\n def fail():\n raise ValueError(\"\")\n\n fail = log(log(fail))\n\n excinfo = pytest.raises(ValueError, fail)\n assert excinfo.traceback.recursionindex() is None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestTraceback_f_g_h.test_traceback_getcrashentry_TestTraceback_f_g_h.test_traceback_getcrashentry_empty.assert_entry_frame_code_n": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestTraceback_f_g_h.test_traceback_getcrashentry_TestTraceback_f_g_h.test_traceback_getcrashentry_empty.assert_entry_frame_code_n", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 294, "end_line": 332, "span_ids": ["TestTraceback_f_g_h.test_traceback_getcrashentry", "TestTraceback_f_g_h.test_traceback_getcrashentry_empty"], "tokens": 252}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTraceback_f_g_h(object):\n\n def test_traceback_getcrashentry(self):\n def i():\n __tracebackhide__ = True\n raise ValueError\n\n def h():\n i()\n\n def g():\n __tracebackhide__ = True\n h()\n\n def f():\n g()\n\n excinfo = pytest.raises(ValueError, f)\n tb = excinfo.traceback\n entry = tb.getcrashentry()\n co = _pytest._code.Code(h)\n assert entry.frame.code.path == co.path\n assert entry.lineno == co.firstlineno + 1\n assert entry.frame.code.name == \"h\"\n\n def test_traceback_getcrashentry_empty(self):\n def g():\n __tracebackhide__ = True\n raise ValueError\n\n def f():\n __tracebackhide__ = True\n g()\n\n excinfo = pytest.raises(ValueError, f)\n tb = excinfo.traceback\n entry = tb.getcrashentry()\n co = _pytest._code.Code(g)\n assert entry.frame.code.path == co.path\n assert entry.lineno == co.firstlineno + 2\n assert entry.frame.code.name == \"g\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_test_excinfo_exconly_test_excinfo_no_sourcecode.assert_s_File_str": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_test_excinfo_exconly_test_excinfo_no_sourcecode.assert_s_File_str", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 335, "end_line": 376, "span_ids": ["test_excinfo_for_later", "test_excinfo_no_sourcecode", "test_excinfo_exconly", "test_excinfo_str", "test_excinfo_errisinstance", "test_excinfo_repr"], "tokens": 315}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_excinfo_exconly():\n excinfo = pytest.raises(ValueError, h)\n assert excinfo.exconly().startswith(\"ValueError\")\n with pytest.raises(ValueError) as excinfo:\n raise ValueError(\"hello\\nworld\")\n msg = excinfo.exconly(tryshort=True)\n assert msg.startswith(\"ValueError\")\n assert msg.endswith(\"world\")\n\n\ndef test_excinfo_repr():\n excinfo = pytest.raises(ValueError, h)\n s = repr(excinfo)\n assert s == \"\"\n\n\ndef test_excinfo_str():\n excinfo = pytest.raises(ValueError, h)\n s = str(excinfo)\n assert s.startswith(__file__[:-9]) # pyc file and $py.class\n assert s.endswith(\"ValueError\")\n assert len(s.split(\":\")) >= 3 # on windows it's 4\n\n\ndef test_excinfo_for_later():\n e = ExceptionInfo.for_later()\n assert \"for raises\" in repr(e)\n assert \"for raises\" in str(e)\n\n\ndef test_excinfo_errisinstance():\n excinfo = pytest.raises(ValueError, h)\n assert excinfo.errisinstance(ValueError)\n\n\ndef test_excinfo_no_sourcecode():\n try:\n exec(\"raise ValueError()\")\n except ValueError:\n excinfo = _pytest._code.ExceptionInfo.from_current()\n s = str(excinfo.traceback[-1])\n assert s == \" File '':1 in \\n ???\\n\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_test_excinfo_no_python_sourcecode_test_excinfo_no_python_sourcecode.for_item_in_excinfo_trace.if_item_path_basename_.assert_str_item_source_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_test_excinfo_no_python_sourcecode_test_excinfo_no_python_sourcecode.for_item_in_excinfo_trace.if_item_path_basename_.assert_str_item_source_", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 379, "end_line": 392, "span_ids": ["test_excinfo_no_python_sourcecode"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_excinfo_no_python_sourcecode(tmpdir):\n # XXX: simplified locally testable version\n tmpdir.join(\"test.txt\").write(\"{{ h()}}:\")\n\n jinja2 = pytest.importorskip(\"jinja2\")\n loader = jinja2.FileSystemLoader(str(tmpdir))\n env = jinja2.Environment(loader=loader)\n template = env.get_template(\"test.txt\")\n excinfo = pytest.raises(ValueError, template.render, h=h)\n for item in excinfo.traceback:\n print(item) # XXX: for some reason jinja.Template.render is printed in full\n item.source # shouldnt fail\n if item.path.basename == \"test.txt\":\n assert str(item.source) == \"{{ h()}}:\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_test_entrysource_Queue_example_test_match_raises_error.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_test_entrysource_Queue_example_test_match_raises_error.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 395, "end_line": 437, "span_ids": ["test_match_raises_error", "test_entrysource_Queue_example", "test_codepath_Queue_example", "test_match_succeeds"], "tokens": 294}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_entrysource_Queue_example():\n try:\n queue.Queue().get(timeout=0.001)\n except queue.Empty:\n excinfo = _pytest._code.ExceptionInfo.from_current()\n entry = excinfo.traceback[-1]\n source = entry.getsource()\n assert source is not None\n s = str(source).strip()\n assert s.startswith(\"def get\")\n\n\ndef test_codepath_Queue_example():\n try:\n queue.Queue().get(timeout=0.001)\n except queue.Empty:\n excinfo = _pytest._code.ExceptionInfo.from_current()\n entry = excinfo.traceback[-1]\n path = entry.path\n assert isinstance(path, py.path.local)\n assert path.basename.lower() == \"queue.py\"\n assert path.check()\n\n\ndef test_match_succeeds():\n with pytest.raises(ZeroDivisionError) as excinfo:\n 0 // 0\n excinfo.match(r\".*zero.*\")\n\n\ndef test_match_raises_error(testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n def test_division_zero():\n with pytest.raises(ZeroDivisionError) as excinfo:\n 0 / 0\n excinfo.match(r'[123]+')\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret != 0\n result.stdout.fnmatch_lines([\"*AssertionError*Pattern*[123]*not found*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo_TestFormattedExcinfo.test_repr_source.assert_lines_1_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo_TestFormattedExcinfo.test_repr_source.assert_lines_1_", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 440, "end_line": 477, "span_ids": ["TestFormattedExcinfo.importasmod", "TestFormattedExcinfo.excinfo_from_exec", "TestFormattedExcinfo", "TestFormattedExcinfo.test_repr_source"], "tokens": 267}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n @pytest.fixture\n def importasmod(self, request, _sys_snapshot):\n def importasmod(source):\n source = textwrap.dedent(source)\n tmpdir = request.getfixturevalue(\"tmpdir\")\n modpath = tmpdir.join(\"mod.py\")\n tmpdir.ensure(\"__init__.py\")\n modpath.write(source)\n if invalidate_import_caches is not None:\n invalidate_import_caches()\n return modpath.pyimport()\n\n return importasmod\n\n def excinfo_from_exec(self, source):\n source = _pytest._code.Source(source).strip()\n try:\n exec(source.compile())\n except KeyboardInterrupt:\n raise\n except: # noqa\n return _pytest._code.ExceptionInfo.from_current()\n assert 0, \"did not raise\"\n\n def test_repr_source(self):\n pr = FormattedExcinfo()\n source = _pytest._code.Source(\n \"\"\"\\\n def f(x):\n pass\n \"\"\"\n ).strip()\n pr.flow_marker = \"|\"\n lines = pr.get_source(source, 0)\n assert len(lines) == 2\n assert lines[0] == \"| def f(x):\"\n assert lines[1] == \" pass\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_source_excinfo_TestFormattedExcinfo.test_repr_source_excinfo.assert_lines_def": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_source_excinfo_TestFormattedExcinfo.test_repr_source_excinfo.assert_lines_def", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 479, "end_line": 492, "span_ids": ["TestFormattedExcinfo.test_repr_source_excinfo"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_repr_source_excinfo(self):\n \"\"\" check if indentation is right \"\"\"\n pr = FormattedExcinfo()\n excinfo = self.excinfo_from_exec(\n \"\"\"\n def f():\n assert 0\n f()\n \"\"\"\n )\n pr = FormattedExcinfo()\n source = pr._getentrysource(excinfo.traceback[-1])\n lines = pr.get_source(source, 1, excinfo)\n assert lines == [\" def f():\", \"> assert 0\", \"E AssertionError\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_source_not_existing_TestFormattedExcinfo.test_repr_source_not_existing.if_sys_version_info_0_.assert_repr_chain_0_0_r": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_source_not_existing_TestFormattedExcinfo.test_repr_source_not_existing.if_sys_version_info_0_.assert_repr_chain_0_0_r", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 494, "end_line": 504, "span_ids": ["TestFormattedExcinfo.test_repr_source_not_existing"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_repr_source_not_existing(self):\n pr = FormattedExcinfo()\n co = compile(\"raise ValueError()\", \"\", \"exec\")\n try:\n exec(co)\n except ValueError:\n excinfo = _pytest._code.ExceptionInfo.from_current()\n repr = pr.repr_excinfo(excinfo)\n assert repr.reprtraceback.reprentries[1].lines[0] == \"> ???\"\n if sys.version_info[0] >= 3:\n assert repr.chain[0][0].reprentries[1].lines[0] == \"> ???\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_many_line_source_not_existing_TestFormattedExcinfo.test_repr_many_line_source_not_existing.if_sys_version_info_0_.assert_repr_chain_0_0_r": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_many_line_source_not_existing_TestFormattedExcinfo.test_repr_many_line_source_not_existing.if_sys_version_info_0_.assert_repr_chain_0_0_r", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 506, "end_line": 523, "span_ids": ["TestFormattedExcinfo.test_repr_many_line_source_not_existing"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_repr_many_line_source_not_existing(self):\n pr = FormattedExcinfo()\n co = compile(\n \"\"\"\na = 1\nraise ValueError()\n\"\"\",\n \"\",\n \"exec\",\n )\n try:\n exec(co)\n except ValueError:\n excinfo = _pytest._code.ExceptionInfo.from_current()\n repr = pr.repr_excinfo(excinfo)\n assert repr.reprtraceback.reprentries[1].lines[0] == \"> ???\"\n if sys.version_info[0] >= 3:\n assert repr.chain[0][0].reprentries[1].lines[0] == \"> ???\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_source_failing_fullsource_TestFormattedExcinfo.test_repr_source_failing_fullsource.None_1.assert_repr_chain_0_0_r": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_source_failing_fullsource_TestFormattedExcinfo.test_repr_source_failing_fullsource.None_1.assert_repr_chain_0_0_r", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 525, "end_line": 587, "span_ids": ["TestFormattedExcinfo.test_repr_source_failing_fullsource.FakeRawTB", "TestFormattedExcinfo.test_repr_source_failing_fullsource.FakeExcinfo:2", "TestFormattedExcinfo.test_repr_source_failing_fullsource.FakeTracebackEntry.__init__", "TestFormattedExcinfo.test_repr_source_failing_fullsource.Traceback:2", "TestFormattedExcinfo.test_repr_source_failing_fullsource.FakeCode", "TestFormattedExcinfo.test_repr_source_failing_fullsource.FakeExcinfo", "TestFormattedExcinfo.test_repr_source_failing_fullsource.Traceback", "TestFormattedExcinfo.test_repr_source_failing_fullsource.FakeFrame:2", "TestFormattedExcinfo.test_repr_source_failing_fullsource.FakeFrame", "TestFormattedExcinfo.test_repr_source_failing_fullsource.FakeRawTB:2", "TestFormattedExcinfo.test_repr_source_failing_fullsource.FakeCode.raw:2", "TestFormattedExcinfo.test_repr_source_failing_fullsource.FakeTracebackEntry", "TestFormattedExcinfo.test_repr_source_failing_fullsource"], "tokens": 421}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_repr_source_failing_fullsource(self):\n pr = FormattedExcinfo()\n\n class FakeCode(object):\n class raw(object):\n co_filename = \"?\"\n\n path = \"?\"\n firstlineno = 5\n\n def fullsource(self):\n return None\n\n fullsource = property(fullsource)\n\n class FakeFrame(object):\n code = FakeCode()\n f_locals = {}\n f_globals = {}\n\n class FakeTracebackEntry(_pytest._code.Traceback.Entry):\n def __init__(self, tb, excinfo=None):\n self.lineno = 5 + 3\n\n @property\n def frame(self):\n return FakeFrame()\n\n class Traceback(_pytest._code.Traceback):\n Entry = FakeTracebackEntry\n\n class FakeExcinfo(_pytest._code.ExceptionInfo):\n typename = \"Foo\"\n value = Exception()\n\n def __init__(self):\n pass\n\n def exconly(self, tryshort):\n return \"EXC\"\n\n def errisinstance(self, cls):\n return False\n\n excinfo = FakeExcinfo()\n\n class FakeRawTB(object):\n tb_next = None\n\n tb = FakeRawTB()\n excinfo.traceback = Traceback(tb)\n\n fail = IOError()\n repr = pr.repr_excinfo(excinfo)\n assert repr.reprtraceback.reprentries[0].lines[0] == \"> ???\"\n if sys.version_info[0] >= 3:\n assert repr.chain[0][0].reprentries[0].lines[0] == \"> ???\"\n\n fail = py.error.ENOENT # noqa\n repr = pr.repr_excinfo(excinfo)\n assert repr.reprtraceback.reprentries[0].lines[0] == \"> ???\"\n if sys.version_info[0] >= 3:\n assert repr.chain[0][0].reprentries[0].lines[0] == \"> ???\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_local_TestFormattedExcinfo.test_repr_local.assert_reprlocals_lines_3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_local_TestFormattedExcinfo.test_repr_local.assert_reprlocals_lines_3", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 589, "end_line": 597, "span_ids": ["TestFormattedExcinfo.test_repr_local"], "tokens": 141}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_repr_local(self):\n p = FormattedExcinfo(showlocals=True)\n loc = {\"y\": 5, \"z\": 7, \"x\": 3, \"@x\": 2, \"__builtins__\": {}}\n reprlocals = p.repr_locals(loc)\n assert reprlocals.lines\n assert reprlocals.lines[0] == \"__builtins__ = \"\n assert reprlocals.lines[1] == \"x = 3\"\n assert reprlocals.lines[2] == \"y = 5\"\n assert reprlocals.lines[3] == \"z = 7\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_local_with_error_TestFormattedExcinfo.test_repr_local_with_error.assert_NotImplementedEr": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_local_with_error_TestFormattedExcinfo.test_repr_local_with_error.assert_NotImplementedEr", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 599, "end_line": 609, "span_ids": ["TestFormattedExcinfo.test_repr_local_with_error", "TestFormattedExcinfo.test_repr_local_with_error.ObjWithErrorInRepr", "TestFormattedExcinfo.test_repr_local_with_error.ObjWithErrorInRepr.__repr__"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_repr_local_with_error(self):\n class ObjWithErrorInRepr:\n def __repr__(self):\n raise NotImplementedError\n\n p = FormattedExcinfo(showlocals=True, truncate_locals=False)\n loc = {\"x\": ObjWithErrorInRepr(), \"__builtins__\": {}}\n reprlocals = p.repr_locals(loc)\n assert reprlocals.lines\n assert reprlocals.lines[0] == \"__builtins__ = \"\n assert '[NotImplementedError(\"\") raised in repr()]' in reprlocals.lines[1]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_local_with_exception_in_class_property_TestFormattedExcinfo.test_repr_local_with_exception_in_class_property.assert_ExceptionWithBro": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_local_with_exception_in_class_property_TestFormattedExcinfo.test_repr_local_with_exception_in_class_property.assert_ExceptionWithBro", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 611, "end_line": 626, "span_ids": ["TestFormattedExcinfo.test_repr_local_with_exception_in_class_property.ExceptionWithBrokenClass.__class__", "TestFormattedExcinfo.test_repr_local_with_exception_in_class_property", "TestFormattedExcinfo.test_repr_local_with_exception_in_class_property.ObjWithErrorInRepr.__repr__", "TestFormattedExcinfo.test_repr_local_with_exception_in_class_property.ExceptionWithBrokenClass", "TestFormattedExcinfo.test_repr_local_with_exception_in_class_property.ObjWithErrorInRepr"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_repr_local_with_exception_in_class_property(self):\n class ExceptionWithBrokenClass(Exception):\n @property\n def __class__(self):\n raise TypeError(\"boom!\")\n\n class ObjWithErrorInRepr:\n def __repr__(self):\n raise ExceptionWithBrokenClass()\n\n p = FormattedExcinfo(showlocals=True, truncate_locals=False)\n loc = {\"x\": ObjWithErrorInRepr(), \"__builtins__\": {}}\n reprlocals = p.repr_locals(loc)\n assert reprlocals.lines\n assert reprlocals.lines[0] == \"__builtins__ = \"\n assert '[ExceptionWithBrokenClass(\"\") raised in repr()]' in reprlocals.lines[1]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_local_truncated_TestFormattedExcinfo.test_repr_local_truncated.None_3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_local_truncated_TestFormattedExcinfo.test_repr_local_truncated.None_3", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 628, "end_line": 638, "span_ids": ["TestFormattedExcinfo.test_repr_local_truncated"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_repr_local_truncated(self):\n loc = {\"l\": [i for i in range(10)]}\n p = FormattedExcinfo(showlocals=True)\n truncated_reprlocals = p.repr_locals(loc)\n assert truncated_reprlocals.lines\n assert truncated_reprlocals.lines[0] == \"l = [0, 1, 2, 3, 4, 5, ...]\"\n\n q = FormattedExcinfo(showlocals=True, truncate_locals=False)\n full_reprlocals = q.repr_locals(loc)\n assert full_reprlocals.lines\n assert full_reprlocals.lines[0] == \"l = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_tracebackentry_lines_TestFormattedExcinfo.test_repr_tracebackentry_lines._assert_loc_message_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_tracebackentry_lines_TestFormattedExcinfo.test_repr_tracebackentry_lines._assert_loc_message_", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 640, "end_line": 671, "span_ids": ["TestFormattedExcinfo.test_repr_tracebackentry_lines"], "tokens": 305}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_repr_tracebackentry_lines(self, importasmod):\n mod = importasmod(\n \"\"\"\n def func1():\n raise ValueError(\"hello\\\\nworld\")\n \"\"\"\n )\n excinfo = pytest.raises(ValueError, mod.func1)\n excinfo.traceback = excinfo.traceback.filter()\n p = FormattedExcinfo()\n reprtb = p.repr_traceback_entry(excinfo.traceback[-1])\n\n # test as intermittent entry\n lines = reprtb.lines\n assert lines[0] == \" def func1():\"\n assert lines[1] == '> raise ValueError(\"hello\\\\nworld\")'\n\n # test as last entry\n p = FormattedExcinfo(showlocals=True)\n repr_entry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)\n lines = repr_entry.lines\n assert lines[0] == \" def func1():\"\n assert lines[1] == '> raise ValueError(\"hello\\\\nworld\")'\n assert lines[2] == \"E ValueError: hello\"\n assert lines[3] == \"E world\"\n assert not lines[4:]\n\n loc = repr_entry.reprlocals is not None\n loc = repr_entry.reprfileloc\n assert loc.path == mod.__file__\n assert loc.lineno == 3\n # assert loc.message == \"ValueError: hello\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_tracebackentry_lines2_TestFormattedExcinfo.test_repr_tracebackentry_lines2.assert_tw_lines_2_z_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_tracebackentry_lines2_TestFormattedExcinfo.test_repr_tracebackentry_lines2.assert_tw_lines_2_z_", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 673, "end_line": 697, "span_ids": ["TestFormattedExcinfo.test_repr_tracebackentry_lines2"], "tokens": 321}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_repr_tracebackentry_lines2(self, importasmod):\n mod = importasmod(\n \"\"\"\n def func1(m, x, y, z):\n raise ValueError(\"hello\\\\nworld\")\n \"\"\"\n )\n excinfo = pytest.raises(ValueError, mod.func1, \"m\" * 90, 5, 13, \"z\" * 120)\n excinfo.traceback = excinfo.traceback.filter()\n entry = excinfo.traceback[-1]\n p = FormattedExcinfo(funcargs=True)\n reprfuncargs = p.repr_args(entry)\n assert reprfuncargs.args[0] == (\"m\", repr(\"m\" * 90))\n assert reprfuncargs.args[1] == (\"x\", \"5\")\n assert reprfuncargs.args[2] == (\"y\", \"13\")\n assert reprfuncargs.args[3] == (\"z\", repr(\"z\" * 120))\n\n p = FormattedExcinfo(funcargs=True)\n repr_entry = p.repr_traceback_entry(entry)\n assert repr_entry.reprfuncargs.args == reprfuncargs.args\n tw = TWMock()\n repr_entry.toterminal(tw)\n assert tw.lines[0] == \"m = \" + repr(\"m\" * 90)\n assert tw.lines[1] == \"x = 5, y = 13\"\n assert tw.lines[2] == \"z = \" + repr(\"z\" * 120)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_tracebackentry_lines_var_kw_args_TestFormattedExcinfo.test_repr_tracebackentry_lines_var_kw_args.assert_tw_lines_0_x_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_tracebackentry_lines_var_kw_args_TestFormattedExcinfo.test_repr_tracebackentry_lines_var_kw_args.assert_tw_lines_0_x_", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 699, "end_line": 720, "span_ids": ["TestFormattedExcinfo.test_repr_tracebackentry_lines_var_kw_args"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_repr_tracebackentry_lines_var_kw_args(self, importasmod):\n mod = importasmod(\n \"\"\"\n def func1(x, *y, **z):\n raise ValueError(\"hello\\\\nworld\")\n \"\"\"\n )\n excinfo = pytest.raises(ValueError, mod.func1, \"a\", \"b\", c=\"d\")\n excinfo.traceback = excinfo.traceback.filter()\n entry = excinfo.traceback[-1]\n p = FormattedExcinfo(funcargs=True)\n reprfuncargs = p.repr_args(entry)\n assert reprfuncargs.args[0] == (\"x\", repr(\"a\"))\n assert reprfuncargs.args[1] == (\"y\", repr((\"b\",)))\n assert reprfuncargs.args[2] == (\"z\", repr({\"c\": \"d\"}))\n\n p = FormattedExcinfo(funcargs=True)\n repr_entry = p.repr_traceback_entry(entry)\n assert repr_entry.reprfuncargs.args == reprfuncargs.args\n tw = TWMock()\n repr_entry.toterminal(tw)\n assert tw.lines[0] == \"x = 'a', y = ('b',), z = {'c': 'd'}\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_tracebackentry_short_TestFormattedExcinfo.test_repr_tracebackentry_short.None_6": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_tracebackentry_short_TestFormattedExcinfo.test_repr_tracebackentry_short.None_6", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 722, "end_line": 747, "span_ids": ["TestFormattedExcinfo.test_repr_tracebackentry_short"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_repr_tracebackentry_short(self, importasmod):\n mod = importasmod(\n \"\"\"\n def func1():\n raise ValueError(\"hello\")\n def entry():\n func1()\n \"\"\"\n )\n excinfo = pytest.raises(ValueError, mod.entry)\n p = FormattedExcinfo(style=\"short\")\n reprtb = p.repr_traceback_entry(excinfo.traceback[-2])\n lines = reprtb.lines\n basename = py.path.local(mod.__file__).basename\n assert lines[0] == \" func1()\"\n assert basename in str(reprtb.reprfileloc.path)\n assert reprtb.reprfileloc.lineno == 5\n\n # test last entry\n p = FormattedExcinfo(style=\"short\")\n reprtb = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)\n lines = reprtb.lines\n assert lines[0] == ' raise ValueError(\"hello\")'\n assert lines[1] == \"E ValueError: hello\"\n assert basename in str(reprtb.reprfileloc.path)\n assert reprtb.reprfileloc.lineno == 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_tracebackentry_no_TestFormattedExcinfo.test_repr_tracebackentry_no.assert_not_lines_1_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_tracebackentry_no_TestFormattedExcinfo.test_repr_tracebackentry_no.assert_not_lines_1_", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 749, "end_line": 766, "span_ids": ["TestFormattedExcinfo.test_repr_tracebackentry_no"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_repr_tracebackentry_no(self, importasmod):\n mod = importasmod(\n \"\"\"\n def func1():\n raise ValueError(\"hello\")\n def entry():\n func1()\n \"\"\"\n )\n excinfo = pytest.raises(ValueError, mod.entry)\n p = FormattedExcinfo(style=\"no\")\n p.repr_traceback_entry(excinfo.traceback[-2])\n\n p = FormattedExcinfo(style=\"no\")\n reprentry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)\n lines = reprentry.lines\n assert lines[0] == \"E ValueError: hello\"\n assert not lines[1:]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_traceback_tbfilter_TestFormattedExcinfo.test_repr_traceback_tbfilter.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_traceback_tbfilter_TestFormattedExcinfo.test_repr_traceback_tbfilter.None_1", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 768, "end_line": 783, "span_ids": ["TestFormattedExcinfo.test_repr_traceback_tbfilter"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_repr_traceback_tbfilter(self, importasmod):\n mod = importasmod(\n \"\"\"\n def f(x):\n raise ValueError(x)\n def entry():\n f(0)\n \"\"\"\n )\n excinfo = pytest.raises(ValueError, mod.entry)\n p = FormattedExcinfo(tbfilter=True)\n reprtb = p.repr_traceback(excinfo)\n assert len(reprtb.reprentries) == 2\n p = FormattedExcinfo(tbfilter=False)\n reprtb = p.repr_traceback(excinfo)\n assert len(reprtb.reprentries) == 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_traceback_short_no_source_TestFormattedExcinfo.test_traceback_short_no_source.assert_last_lines_1_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_traceback_short_no_source_TestFormattedExcinfo.test_traceback_short_no_source.assert_last_lines_1_", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 785, "end_line": 809, "span_ids": ["TestFormattedExcinfo.test_traceback_short_no_source"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_traceback_short_no_source(self, importasmod, monkeypatch):\n mod = importasmod(\n \"\"\"\n def func1():\n raise ValueError(\"hello\")\n def entry():\n func1()\n \"\"\"\n )\n excinfo = pytest.raises(ValueError, mod.entry)\n from _pytest._code.code import Code\n\n monkeypatch.setattr(Code, \"path\", \"bogus\")\n excinfo.traceback[0].frame.code.path = \"bogus\"\n p = FormattedExcinfo(style=\"short\")\n reprtb = p.repr_traceback_entry(excinfo.traceback[-2])\n lines = reprtb.lines\n last_p = FormattedExcinfo(style=\"short\")\n last_reprtb = last_p.repr_traceback_entry(excinfo.traceback[-1], excinfo)\n last_lines = last_reprtb.lines\n monkeypatch.undo()\n assert lines[0] == \" func1()\"\n\n assert last_lines[0] == ' raise ValueError(\"hello\")'\n assert last_lines[1] == \"E ValueError: hello\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_traceback_and_excinfo_TestFormattedExcinfo.test_repr_traceback_and_excinfo.for_style_in_long_sh.assert_repr_reprcrash_mes": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_traceback_and_excinfo_TestFormattedExcinfo.test_repr_traceback_and_excinfo.for_style_in_long_sh.assert_repr_reprcrash_mes", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 811, "end_line": 835, "span_ids": ["TestFormattedExcinfo.test_repr_traceback_and_excinfo"], "tokens": 245}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_repr_traceback_and_excinfo(self, importasmod):\n mod = importasmod(\n \"\"\"\n def f(x):\n raise ValueError(x)\n def entry():\n f(0)\n \"\"\"\n )\n excinfo = pytest.raises(ValueError, mod.entry)\n\n for style in (\"long\", \"short\"):\n p = FormattedExcinfo(style=style)\n reprtb = p.repr_traceback(excinfo)\n assert len(reprtb.reprentries) == 2\n assert reprtb.style == style\n assert not reprtb.extraline\n repr = p.repr_excinfo(excinfo)\n assert repr.reprtraceback\n assert len(repr.reprtraceback.reprentries) == len(reprtb.reprentries)\n if sys.version_info[0] >= 3:\n assert repr.chain[0][0]\n assert len(repr.chain[0][0].reprentries) == len(reprtb.reprentries)\n assert repr.reprcrash.path.endswith(\"mod.py\")\n assert repr.reprcrash.message == \"ValueError: 0\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_traceback_with_invalid_cwd_TestFormattedExcinfo.test_repr_traceback_with_invalid_cwd.p_repr_traceback_excinfo_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_traceback_with_invalid_cwd_TestFormattedExcinfo.test_repr_traceback_with_invalid_cwd.p_repr_traceback_excinfo_", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 837, "end_line": 855, "span_ids": ["TestFormattedExcinfo.test_repr_traceback_with_invalid_cwd"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_repr_traceback_with_invalid_cwd(self, importasmod, monkeypatch):\n mod = importasmod(\n \"\"\"\n def f(x):\n raise ValueError(x)\n def entry():\n f(0)\n \"\"\"\n )\n excinfo = pytest.raises(ValueError, mod.entry)\n\n p = FormattedExcinfo()\n\n def raiseos():\n raise OSError(2)\n\n monkeypatch.setattr(os, \"getcwd\", raiseos)\n assert p._makepath(__file__) == __file__\n p.repr_traceback(excinfo)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_excinfo_addouterr_TestFormattedExcinfo.test_repr_excinfo_reprcrash.assert_str_repr_reprcrash": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_excinfo_addouterr_TestFormattedExcinfo.test_repr_excinfo_reprcrash.assert_str_repr_reprcrash", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 857, "end_line": 884, "span_ids": ["TestFormattedExcinfo.test_repr_excinfo_reprcrash", "TestFormattedExcinfo.test_repr_excinfo_addouterr"], "tokens": 229}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_repr_excinfo_addouterr(self, importasmod):\n mod = importasmod(\n \"\"\"\n def entry():\n raise ValueError()\n \"\"\"\n )\n excinfo = pytest.raises(ValueError, mod.entry)\n repr = excinfo.getrepr()\n repr.addsection(\"title\", \"content\")\n twmock = TWMock()\n repr.toterminal(twmock)\n assert twmock.lines[-1] == \"content\"\n assert twmock.lines[-2] == (\"-\", \"title\")\n\n def test_repr_excinfo_reprcrash(self, importasmod):\n mod = importasmod(\n \"\"\"\n def entry():\n raise ValueError()\n \"\"\"\n )\n excinfo = pytest.raises(ValueError, mod.entry)\n repr = excinfo.getrepr()\n assert repr.reprcrash.path.endswith(\"mod.py\")\n assert repr.reprcrash.lineno == 3\n assert repr.reprcrash.message == \"ValueError\"\n assert str(repr.reprcrash).endswith(\"mod.py:3: ValueError\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_traceback_recursion_TestFormattedExcinfo.test_repr_traceback_recursion.for_style_in_short_l.assert_str_reprtb_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_repr_traceback_recursion_TestFormattedExcinfo.test_repr_traceback_recursion.for_style_in_short_l.assert_str_reprtb_", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 886, "end_line": 903, "span_ids": ["TestFormattedExcinfo.test_repr_traceback_recursion"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_repr_traceback_recursion(self, importasmod):\n mod = importasmod(\n \"\"\"\n def rec2(x):\n return rec1(x+1)\n def rec1(x):\n return rec2(x-1)\n def entry():\n rec1(42)\n \"\"\"\n )\n excinfo = pytest.raises(RuntimeError, mod.entry)\n\n for style in (\"short\", \"long\", \"no\"):\n p = FormattedExcinfo(style=\"short\")\n reprtb = p.repr_traceback(excinfo)\n assert reprtb.extraline == \"!!! Recursion detected (same locals & position)\"\n assert str(reprtb)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_reprexcinfo_getrepr_TestFormattedExcinfo.test_reprexcinfo_unicode.assert_x_u_\u044f_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_reprexcinfo_getrepr_TestFormattedExcinfo.test_reprexcinfo_unicode.assert_x_u_\u044f_", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 905, "end_line": 935, "span_ids": ["TestFormattedExcinfo.test_reprexcinfo_unicode.MyRepr", "TestFormattedExcinfo.test_reprexcinfo_getrepr", "TestFormattedExcinfo.test_reprexcinfo_unicode", "TestFormattedExcinfo.test_reprexcinfo_unicode.MyRepr.toterminal"], "tokens": 245}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_reprexcinfo_getrepr(self, importasmod):\n mod = importasmod(\n \"\"\"\n def f(x):\n raise ValueError(x)\n def entry():\n f(0)\n \"\"\"\n )\n excinfo = pytest.raises(ValueError, mod.entry)\n\n for style in (\"short\", \"long\", \"no\"):\n for showlocals in (True, False):\n repr = excinfo.getrepr(style=style, showlocals=showlocals)\n if sys.version_info[0] < 3:\n assert isinstance(repr, ReprExceptionInfo)\n assert repr.reprtraceback.style == style\n if sys.version_info[0] >= 3:\n assert isinstance(repr, ExceptionChainRepr)\n for repr in repr.chain:\n assert repr[0].style == style\n\n def test_reprexcinfo_unicode(self):\n from _pytest._code.code import TerminalRepr\n\n class MyRepr(TerminalRepr):\n def toterminal(self, tw):\n tw.line(u\"\u044f\")\n\n x = six.text_type(MyRepr())\n assert x == u\"\u044f\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_toterminal_long_TestFormattedExcinfo.test_toterminal_long.assert_tw_lines_12_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_toterminal_long_TestFormattedExcinfo.test_toterminal_long.assert_tw_lines_12_", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 937, "end_line": 967, "span_ids": ["TestFormattedExcinfo.test_toterminal_long"], "tokens": 289}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_toterminal_long(self, importasmod):\n mod = importasmod(\n \"\"\"\n def g(x):\n raise ValueError(x)\n def f():\n g(3)\n \"\"\"\n )\n excinfo = pytest.raises(ValueError, mod.f)\n excinfo.traceback = excinfo.traceback.filter()\n repr = excinfo.getrepr()\n tw = TWMock()\n repr.toterminal(tw)\n assert tw.lines[0] == \"\"\n tw.lines.pop(0)\n assert tw.lines[0] == \" def f():\"\n assert tw.lines[1] == \"> g(3)\"\n assert tw.lines[2] == \"\"\n line = tw.get_write_msg(3)\n assert line.endswith(\"mod.py\")\n assert tw.lines[4] == (\":5: \")\n assert tw.lines[5] == (\"_ \", None)\n assert tw.lines[6] == \"\"\n assert tw.lines[7] == \" def g(x):\"\n assert tw.lines[8] == \"> raise ValueError(x)\"\n assert tw.lines[9] == \"E ValueError: 3\"\n assert tw.lines[10] == \"\"\n line = tw.get_write_msg(11)\n assert line.endswith(\"mod.py\")\n assert tw.lines[12] == \":3: ValueError\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_toterminal_long_missing_source_TestFormattedExcinfo.test_toterminal_long_missing_source.assert_tw_lines_10_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_toterminal_long_missing_source_TestFormattedExcinfo.test_toterminal_long_missing_source.assert_tw_lines_10_", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 969, "end_line": 998, "span_ids": ["TestFormattedExcinfo.test_toterminal_long_missing_source"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_toterminal_long_missing_source(self, importasmod, tmpdir):\n mod = importasmod(\n \"\"\"\n def g(x):\n raise ValueError(x)\n def f():\n g(3)\n \"\"\"\n )\n excinfo = pytest.raises(ValueError, mod.f)\n tmpdir.join(\"mod.py\").remove()\n excinfo.traceback = excinfo.traceback.filter()\n repr = excinfo.getrepr()\n tw = TWMock()\n repr.toterminal(tw)\n assert tw.lines[0] == \"\"\n tw.lines.pop(0)\n assert tw.lines[0] == \"> ???\"\n assert tw.lines[1] == \"\"\n line = tw.get_write_msg(2)\n assert line.endswith(\"mod.py\")\n assert tw.lines[3] == \":5: \"\n assert tw.lines[4] == (\"_ \", None)\n assert tw.lines[5] == \"\"\n assert tw.lines[6] == \"> ???\"\n assert tw.lines[7] == \"E ValueError: 3\"\n assert tw.lines[8] == \"\"\n line = tw.get_write_msg(9)\n assert line.endswith(\"mod.py\")\n assert tw.lines[10] == \":3: ValueError\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_toterminal_long_incomplete_source_TestFormattedExcinfo.test_toterminal_long_incomplete_source.assert_tw_lines_10_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_toterminal_long_incomplete_source_TestFormattedExcinfo.test_toterminal_long_incomplete_source.assert_tw_lines_10_", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 1000, "end_line": 1029, "span_ids": ["TestFormattedExcinfo.test_toterminal_long_incomplete_source"], "tokens": 273}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_toterminal_long_incomplete_source(self, importasmod, tmpdir):\n mod = importasmod(\n \"\"\"\n def g(x):\n raise ValueError(x)\n def f():\n g(3)\n \"\"\"\n )\n excinfo = pytest.raises(ValueError, mod.f)\n tmpdir.join(\"mod.py\").write(\"asdf\")\n excinfo.traceback = excinfo.traceback.filter()\n repr = excinfo.getrepr()\n tw = TWMock()\n repr.toterminal(tw)\n assert tw.lines[0] == \"\"\n tw.lines.pop(0)\n assert tw.lines[0] == \"> ???\"\n assert tw.lines[1] == \"\"\n line = tw.get_write_msg(2)\n assert line.endswith(\"mod.py\")\n assert tw.lines[3] == \":5: \"\n assert tw.lines[4] == (\"_ \", None)\n assert tw.lines[5] == \"\"\n assert tw.lines[6] == \"> ???\"\n assert tw.lines[7] == \"E ValueError: 3\"\n assert tw.lines[8] == \"\"\n line = tw.get_write_msg(9)\n assert line.endswith(\"mod.py\")\n assert tw.lines[10] == \":3: ValueError\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_toterminal_long_filenames_TestFormattedExcinfo.test_toterminal_long_filenames.try_.finally_.old_chdir_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_toterminal_long_filenames_TestFormattedExcinfo.test_toterminal_long_filenames.try_.finally_.old_chdir_", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 1031, "end_line": 1058, "span_ids": ["TestFormattedExcinfo.test_toterminal_long_filenames"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_toterminal_long_filenames(self, importasmod):\n mod = importasmod(\n \"\"\"\n def f():\n raise ValueError()\n \"\"\"\n )\n excinfo = pytest.raises(ValueError, mod.f)\n tw = TWMock()\n path = py.path.local(mod.__file__)\n old = path.dirpath().chdir()\n try:\n repr = excinfo.getrepr(abspath=False)\n repr.toterminal(tw)\n x = py.path.local().bestrelpath(path)\n if len(x) < len(str(path)):\n msg = tw.get_write_msg(-2)\n assert msg == \"mod.py\"\n assert tw.lines[-1] == \":3: ValueError\"\n\n repr = excinfo.getrepr(abspath=True)\n repr.toterminal(tw)\n msg = tw.get_write_msg(-2)\n assert msg == path\n line = tw.lines[-1]\n assert line == \":3: ValueError\"\n finally:\n old.chdir()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_format_excinfo_TestFormattedExcinfo.test_format_excinfo.assert_tw_stringio_getval": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_format_excinfo_TestFormattedExcinfo.test_format_excinfo.assert_tw_stringio_getval", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 1060, "end_line": 1088, "span_ids": ["TestFormattedExcinfo.test_format_excinfo"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n @pytest.mark.parametrize(\n \"reproptions\",\n [\n {\n \"style\": style,\n \"showlocals\": showlocals,\n \"funcargs\": funcargs,\n \"tbfilter\": tbfilter,\n }\n for style in (\"long\", \"short\", \"no\")\n for showlocals in (True, False)\n for tbfilter in (True, False)\n for funcargs in (True, False)\n ],\n )\n def test_format_excinfo(self, importasmod, reproptions):\n mod = importasmod(\n \"\"\"\n def g(x):\n raise ValueError(x)\n def f():\n g(3)\n \"\"\"\n )\n excinfo = pytest.raises(ValueError, mod.f)\n tw = py.io.TerminalWriter(stringio=True)\n repr = excinfo.getrepr(**reproptions)\n repr.toterminal(tw)\n assert tw.stringio.getvalue()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_traceback_repr_style_TestFormattedExcinfo.test_traceback_repr_style.assert_tw_lines_20_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_traceback_repr_style_TestFormattedExcinfo.test_traceback_repr_style.assert_tw_lines_20_", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 1090, "end_line": 1134, "span_ids": ["TestFormattedExcinfo.test_traceback_repr_style"], "tokens": 404}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n def test_traceback_repr_style(self, importasmod):\n mod = importasmod(\n \"\"\"\n def f():\n g()\n def g():\n h()\n def h():\n i()\n def i():\n raise ValueError()\n \"\"\"\n )\n excinfo = pytest.raises(ValueError, mod.f)\n excinfo.traceback = excinfo.traceback.filter()\n excinfo.traceback[1].set_repr_style(\"short\")\n excinfo.traceback[2].set_repr_style(\"short\")\n r = excinfo.getrepr(style=\"long\")\n tw = TWMock()\n r.toterminal(tw)\n for line in tw.lines:\n print(line)\n assert tw.lines[0] == \"\"\n assert tw.lines[1] == \" def f():\"\n assert tw.lines[2] == \"> g()\"\n assert tw.lines[3] == \"\"\n msg = tw.get_write_msg(4)\n assert msg.endswith(\"mod.py\")\n assert tw.lines[5] == \":3: \"\n assert tw.lines[6] == (\"_ \", None)\n tw.get_write_msg(7)\n assert tw.lines[8].endswith(\"in g\")\n assert tw.lines[9] == \" h()\"\n tw.get_write_msg(10)\n assert tw.lines[11].endswith(\"in h\")\n assert tw.lines[12] == \" i()\"\n assert tw.lines[13] == (\"_ \", None)\n assert tw.lines[14] == \"\"\n assert tw.lines[15] == \" def i():\"\n assert tw.lines[16] == \"> raise ValueError()\"\n assert tw.lines[17] == \"E ValueError\"\n assert tw.lines[18] == \"\"\n msg = tw.get_write_msg(19)\n msg.endswith(\"mod.py\")\n assert tw.lines[20] == \":9: ValueError\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_exc_chain_repr_TestFormattedExcinfo.test_exc_chain_repr.assert_tw_lines_47_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_exc_chain_repr_TestFormattedExcinfo.test_exc_chain_repr.assert_tw_lines_47_", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 1136, "end_line": 1220, "span_ids": ["TestFormattedExcinfo.test_exc_chain_repr"], "tokens": 788}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n @pytest.mark.skipif(\"sys.version_info[0] < 3\")\n def test_exc_chain_repr(self, importasmod):\n mod = importasmod(\n \"\"\"\n class Err(Exception):\n pass\n def f():\n try:\n g()\n except Exception as e:\n raise Err() from e\n finally:\n h()\n def g():\n raise ValueError()\n\n def h():\n raise AttributeError()\n \"\"\"\n )\n excinfo = pytest.raises(AttributeError, mod.f)\n r = excinfo.getrepr(style=\"long\")\n tw = TWMock()\n r.toterminal(tw)\n for line in tw.lines:\n print(line)\n assert tw.lines[0] == \"\"\n assert tw.lines[1] == \" def f():\"\n assert tw.lines[2] == \" try:\"\n assert tw.lines[3] == \"> g()\"\n assert tw.lines[4] == \"\"\n line = tw.get_write_msg(5)\n assert line.endswith(\"mod.py\")\n assert tw.lines[6] == \":6: \"\n assert tw.lines[7] == (\"_ \", None)\n assert tw.lines[8] == \"\"\n assert tw.lines[9] == \" def g():\"\n assert tw.lines[10] == \"> raise ValueError()\"\n assert tw.lines[11] == \"E ValueError\"\n assert tw.lines[12] == \"\"\n line = tw.get_write_msg(13)\n assert line.endswith(\"mod.py\")\n assert tw.lines[14] == \":12: ValueError\"\n assert tw.lines[15] == \"\"\n assert (\n tw.lines[16]\n == \"The above exception was the direct cause of the following exception:\"\n )\n assert tw.lines[17] == \"\"\n assert tw.lines[18] == \" def f():\"\n assert tw.lines[19] == \" try:\"\n assert tw.lines[20] == \" g()\"\n assert tw.lines[21] == \" except Exception as e:\"\n assert tw.lines[22] == \"> raise Err() from e\"\n assert tw.lines[23] == \"E test_exc_chain_repr0.mod.Err\"\n assert tw.lines[24] == \"\"\n line = tw.get_write_msg(25)\n assert line.endswith(\"mod.py\")\n assert tw.lines[26] == \":8: Err\"\n assert tw.lines[27] == \"\"\n assert (\n tw.lines[28]\n == \"During handling of the above exception, another exception occurred:\"\n )\n assert tw.lines[29] == \"\"\n assert tw.lines[30] == \" def f():\"\n assert tw.lines[31] == \" try:\"\n assert tw.lines[32] == \" g()\"\n assert tw.lines[33] == \" except Exception as e:\"\n assert tw.lines[34] == \" raise Err() from e\"\n assert tw.lines[35] == \" finally:\"\n assert tw.lines[36] == \"> h()\"\n assert tw.lines[37] == \"\"\n line = tw.get_write_msg(38)\n assert line.endswith(\"mod.py\")\n assert tw.lines[39] == \":10: \"\n assert tw.lines[40] == (\"_ \", None)\n assert tw.lines[41] == \"\"\n assert tw.lines[42] == \" def h():\"\n assert tw.lines[43] == \"> raise AttributeError()\"\n assert tw.lines[44] == \"E AttributeError\"\n assert tw.lines[45] == \"\"\n line = tw.get_write_msg(46)\n assert line.endswith(\"mod.py\")\n assert tw.lines[47] == \":15: AttributeError\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_exc_repr_chain_suppression_TestFormattedExcinfo.test_exc_repr_chain_suppression.assert_len_tw_lines_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_exc_repr_chain_suppression_TestFormattedExcinfo.test_exc_repr_chain_suppression.assert_len_tw_lines_1", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 1222, "end_line": 1262, "span_ids": ["TestFormattedExcinfo.test_exc_repr_chain_suppression"], "tokens": 376}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n @pytest.mark.skipif(\"sys.version_info[0] < 3\")\n @pytest.mark.parametrize(\"mode\", [\"from_none\", \"explicit_suppress\"])\n def test_exc_repr_chain_suppression(self, importasmod, mode):\n \"\"\"Check that exc repr does not show chained exceptions in Python 3.\n - When the exception is raised with \"from None\"\n - Explicitly suppressed with \"chain=False\" to ExceptionInfo.getrepr().\n \"\"\"\n raise_suffix = \" from None\" if mode == \"from_none\" else \"\"\n mod = importasmod(\n \"\"\"\n def f():\n try:\n g()\n except Exception:\n raise AttributeError(){raise_suffix}\n def g():\n raise ValueError()\n \"\"\".format(\n raise_suffix=raise_suffix\n )\n )\n excinfo = pytest.raises(AttributeError, mod.f)\n r = excinfo.getrepr(style=\"long\", chain=mode != \"explicit_suppress\")\n tw = TWMock()\n r.toterminal(tw)\n for line in tw.lines:\n print(line)\n assert tw.lines[0] == \"\"\n assert tw.lines[1] == \" def f():\"\n assert tw.lines[2] == \" try:\"\n assert tw.lines[3] == \" g()\"\n assert tw.lines[4] == \" except Exception:\"\n assert tw.lines[5] == \"> raise AttributeError(){}\".format(\n raise_suffix\n )\n assert tw.lines[6] == \"E AttributeError\"\n assert tw.lines[7] == \"\"\n line = tw.get_write_msg(8)\n assert line.endswith(\"mod.py\")\n assert tw.lines[9] == \":6: AttributeError\"\n assert len(tw.lines) == 10", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_exc_chain_repr_without_traceback_TestFormattedExcinfo.test_exc_chain_repr_without_traceback.matcher_fnmatch_lines_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_exc_chain_repr_without_traceback_TestFormattedExcinfo.test_exc_chain_repr_without_traceback.matcher_fnmatch_lines_", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 1264, "end_line": 1322, "span_ids": ["TestFormattedExcinfo.test_exc_chain_repr_without_traceback"], "tokens": 392}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n @pytest.mark.skipif(\"sys.version_info[0] < 3\")\n @pytest.mark.parametrize(\n \"reason, description\",\n [\n (\n \"cause\",\n \"The above exception was the direct cause of the following exception:\",\n ),\n (\n \"context\",\n \"During handling of the above exception, another exception occurred:\",\n ),\n ],\n )\n def test_exc_chain_repr_without_traceback(self, importasmod, reason, description):\n \"\"\"\n Handle representation of exception chains where one of the exceptions doesn't have a\n real traceback, such as those raised in a subprocess submitted by the multiprocessing\n module (#1984).\n \"\"\"\n from _pytest.pytester import LineMatcher\n\n exc_handling_code = \" from e\" if reason == \"cause\" else \"\"\n mod = importasmod(\n \"\"\"\n def f():\n try:\n g()\n except Exception as e:\n raise RuntimeError('runtime problem'){exc_handling_code}\n def g():\n raise ValueError('invalid value')\n \"\"\".format(\n exc_handling_code=exc_handling_code\n )\n )\n\n with pytest.raises(RuntimeError) as excinfo:\n mod.f()\n\n # emulate the issue described in #1984\n attr = \"__%s__\" % reason\n getattr(excinfo.value, attr).__traceback__ = None\n\n r = excinfo.getrepr()\n tw = py.io.TerminalWriter(stringio=True)\n tw.hasmarkup = False\n r.toterminal(tw)\n\n matcher = LineMatcher(tw.stringio.getvalue().splitlines())\n matcher.fnmatch_lines(\n [\n \"ValueError: invalid value\",\n description,\n \"* except Exception as e:\",\n \"> * raise RuntimeError('runtime problem')\" + exc_handling_code,\n \"E *RuntimeError: runtime problem\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_exc_chain_repr_cycle_TestFormattedExcinfo.test_exc_chain_repr_cycle.assert_out_expected_ou": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_TestFormattedExcinfo.test_exc_chain_repr_cycle_TestFormattedExcinfo.test_exc_chain_repr_cycle.assert_out_expected_ou", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 1324, "end_line": 1366, "span_ids": ["TestFormattedExcinfo.test_exc_chain_repr_cycle"], "tokens": 313}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormattedExcinfo(object):\n\n @pytest.mark.skipif(\"sys.version_info[0] < 3\")\n def test_exc_chain_repr_cycle(self, importasmod):\n mod = importasmod(\n \"\"\"\n class Err(Exception):\n pass\n def fail():\n return 0 / 0\n def reraise():\n try:\n fail()\n except ZeroDivisionError as e:\n raise Err() from e\n def unreraise():\n try:\n reraise()\n except Err as e:\n raise e.__cause__\n \"\"\"\n )\n excinfo = pytest.raises(ZeroDivisionError, mod.unreraise)\n r = excinfo.getrepr(style=\"short\")\n tw = TWMock()\n r.toterminal(tw)\n out = \"\\n\".join(line for line in tw.lines if isinstance(line, str))\n expected_out = textwrap.dedent(\n \"\"\"\\\n :13: in unreraise\n reraise()\n :10: in reraise\n raise Err() from e\n E test_exc_chain_repr_cycle0.mod.Err\n\n During handling of the above exception, another exception occurred:\n :15: in unreraise\n raise e.__cause__\n :8: in reraise\n fail()\n :5: in fail\n return 0 / 0\n E ZeroDivisionError: division by zero\"\"\"\n )\n assert out == expected_out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_test_repr_traceback_with_unicode_test_cwd_deleted.assert_INTERNALERROR_no": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_test_repr_traceback_with_unicode_test_cwd_deleted.assert_INTERNALERROR_no", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 1369, "end_line": 1395, "span_ids": ["test_repr_traceback_with_unicode", "test_cwd_deleted"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"style\", [\"short\", \"long\"])\n@pytest.mark.parametrize(\"encoding\", [None, \"utf8\", \"utf16\"])\ndef test_repr_traceback_with_unicode(style, encoding):\n msg = u\"\u2639\"\n if encoding is not None:\n msg = msg.encode(encoding)\n try:\n raise RuntimeError(msg)\n except RuntimeError:\n e_info = ExceptionInfo.from_current()\n formatter = FormattedExcinfo(style=style)\n repr_traceback = formatter.repr_traceback(e_info)\n assert repr_traceback is not None\n\n\ndef test_cwd_deleted(testdir):\n testdir.makepyfile(\n \"\"\"\n def test(tmpdir):\n tmpdir.chdir()\n tmpdir.remove()\n assert False\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"* 1 failed in *\"])\n assert \"INTERNALERROR\" not in result.stdout.str() + result.stderr.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_test_exception_repr_extraction_error_on_recursion_test_exception_repr_extraction_error_on_recursion.matcher_fnmatch_lines_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_test_exception_repr_extraction_error_on_recursion_test_exception_repr_extraction_error_on_recursion.matcher_fnmatch_lines_", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 1398, "end_line": 1430, "span_ids": ["test_exception_repr_extraction_error_on_recursion.numpy_like.__eq__", "test_exception_repr_extraction_error_on_recursion.numpy_like", "test_exception_repr_extraction_error_on_recursion"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.usefixtures(\"limited_recursion_depth\")\ndef test_exception_repr_extraction_error_on_recursion():\n \"\"\"\n Ensure we can properly detect a recursion error even\n if some locals raise error on comparison (#2459).\n \"\"\"\n from _pytest.pytester import LineMatcher\n\n class numpy_like(object):\n def __eq__(self, other):\n if type(other) is numpy_like:\n raise ValueError(\n \"The truth value of an array \"\n \"with more than one element is ambiguous.\"\n )\n\n def a(x):\n return b(numpy_like())\n\n def b(x):\n return a(numpy_like())\n\n with pytest.raises(RuntimeError) as excinfo:\n a(numpy_like())\n\n matcher = LineMatcher(str(excinfo.getrepr()).splitlines())\n matcher.fnmatch_lines(\n [\n \"!!! Recursion error detected, but an error occurred locating the origin of recursion.\",\n \"*The following exception happened*\",\n \"*ValueError: The truth value of an array*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_test_no_recursion_index_on_recursion_error_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_excinfo.py_test_no_recursion_index_on_recursion_error_", "embedding": null, "metadata": {"file_path": "testing/code/test_excinfo.py", "file_name": "test_excinfo.py", "file_type": "text/x-python", "category": "test", "start_line": 1433, "end_line": 1447, "span_ids": ["test_no_recursion_index_on_recursion_error.RecursionDepthError.__getattr__", "test_no_recursion_index_on_recursion_error", "test_no_recursion_index_on_recursion_error.RecursionDepthError"], "tokens": 109}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.usefixtures(\"limited_recursion_depth\")\ndef test_no_recursion_index_on_recursion_error():\n \"\"\"\n Ensure that we don't break in case we can't find the recursion index\n during a recursion error (#2486).\n \"\"\"\n\n class RecursionDepthError(object):\n def __getattr__(self, attr):\n return getattr(self, \"_\" + attr)\n\n with pytest.raises(RuntimeError) as excinfo:\n RecursionDepthError().trigger\n assert \"maximum recursion\" in str(excinfo.getrepr())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py__coding_utf_8__test_isparseable.assert_not_Source_chr_0_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py__coding_utf_8__test_isparseable.assert_not_Source_chr_0_", "embedding": null, "metadata": {"file_path": "testing/code/test_source.py", "file_name": "test_source.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 136, "span_ids": ["test_syntaxerror_rerepresentation", "impl", "test_source_putaround", "test_source_from_method.TestClass.test_method", "test_unicode", "test_source_from_inner_function", "test_source_strip_multiline", "test_source_strips", "test_isparseable", "docstring", "test_source_from_function", "test_source_str_function", "test_source_from_method", "test_source_from_method.TestClass", "test_source_putaround_simple", "test_source_from_lines", "imports"], "tokens": 816}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -*- coding: utf-8 -*-\n# flake8: noqa\n# disable flake check on this file because some constructs are strange\n# or redundant on purpose and can't be disable on a line-by-line basis\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport ast\nimport inspect\nimport sys\n\nimport six\n\nimport _pytest._code\nimport pytest\nfrom _pytest._code import Source\n\nastonly = pytest.mark.nothing\nfailsonjython = pytest.mark.xfail(\"sys.platform.startswith('java')\")\n\n\ndef test_source_str_function():\n x = Source(\"3\")\n assert str(x) == \"3\"\n\n x = Source(\" 3\")\n assert str(x) == \"3\"\n\n x = Source(\n \"\"\"\n 3\n \"\"\"\n )\n assert str(x) == \"\\n3\"\n\n\ndef test_unicode():\n x = Source(u\"4\")\n assert str(x) == \"4\"\n co = _pytest._code.compile(u'u\"\u00e5\"', mode=\"eval\")\n val = eval(co)\n assert isinstance(val, six.text_type)\n\n\ndef test_source_from_function():\n source = _pytest._code.Source(test_source_str_function)\n assert str(source).startswith(\"def test_source_str_function():\")\n\n\ndef test_source_from_method():\n class TestClass(object):\n def test_method(self):\n pass\n\n source = _pytest._code.Source(TestClass().test_method)\n assert source.lines == [\"def test_method(self):\", \" pass\"]\n\n\ndef test_source_from_lines():\n lines = [\"a \\n\", \"b\\n\", \"c\"]\n source = _pytest._code.Source(lines)\n assert source.lines == [\"a \", \"b\", \"c\"]\n\n\ndef test_source_from_inner_function():\n def f():\n pass\n\n source = _pytest._code.Source(f, deindent=False)\n assert str(source).startswith(\" def f():\")\n source = _pytest._code.Source(f)\n assert str(source).startswith(\"def f():\")\n\n\ndef test_source_putaround_simple():\n source = Source(\"raise ValueError\")\n source = source.putaround(\n \"try:\",\n \"\"\"\\\n except ValueError:\n x = 42\n else:\n x = 23\"\"\",\n )\n assert (\n str(source)\n == \"\"\"\\\ntry:\n raise ValueError\nexcept ValueError:\n x = 42\nelse:\n x = 23\"\"\"\n )\n\n\ndef test_source_putaround():\n source = Source()\n source = source.putaround(\n \"\"\"\n if 1:\n x=1\n \"\"\"\n )\n assert str(source).strip() == \"if 1:\\n x=1\"\n\n\ndef test_source_strips():\n source = Source(\"\")\n assert source == Source()\n assert str(source) == \"\"\n assert source.strip() == source\n\n\ndef test_source_strip_multiline():\n source = Source()\n source.lines = [\"\", \" hello\", \" \"]\n source2 = source.strip()\n assert source2.lines == [\" hello\"]\n\n\ndef test_syntaxerror_rerepresentation():\n ex = pytest.raises(SyntaxError, _pytest._code.compile, \"xyz xyz\")\n assert ex.value.lineno == 1\n assert ex.value.offset in (4, 5, 7) # XXX pypy/jython versus cpython?\n assert ex.value.text.strip(), \"x x\"\n\n\ndef test_isparseable():\n assert Source(\"hello\").isparseable()\n assert Source(\"if 1:\\n pass\").isparseable()\n assert Source(\" \\nif 1:\\n pass\").isparseable()\n assert not Source(\"if 1:\\n\").isparseable()\n assert not Source(\" \\nif 1:\\npass\").isparseable()\n assert not Source(chr(0)).isparseable()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_TestAccesses_TestAccesses.test_iter.assert_len_values_4": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_TestAccesses_TestAccesses.test_iter.assert_len_values_4", "embedding": null, "metadata": {"file_path": "testing/code/test_source.py", "file_name": "test_source.py", "file_type": "text/x-python", "category": "test", "start_line": 138, "end_line": 163, "span_ids": ["TestAccesses.test_getrange", "TestAccesses.test_iter", "TestAccesses.test_len", "TestAccesses", "TestAccesses.test_getline"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAccesses(object):\n source = Source(\n \"\"\"\\\n def f(x):\n pass\n def g(x):\n pass\n \"\"\"\n )\n\n def test_getrange(self):\n x = self.source[0:2]\n assert x.isparseable()\n assert len(x.lines) == 2\n assert str(x) == \"def f(x):\\n pass\"\n\n def test_getline(self):\n x = self.source[0]\n assert x == \"def f(x):\"\n\n def test_len(self):\n assert len(self.source) == 4\n\n def test_iter(self):\n values = [x for x in self.source]\n assert len(values) == 4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_TestSourceParsingAndCompiling_TestSourceParsingAndCompiling.test_getstatementrange_triple_quoted.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_TestSourceParsingAndCompiling_TestSourceParsingAndCompiling.test_getstatementrange_triple_quoted.None_1", "embedding": null, "metadata": {"file_path": "testing/code/test_source.py", "file_name": "test_source.py", "file_type": "text/x-python", "category": "test", "start_line": 166, "end_line": 227, "span_ids": ["TestSourceParsingAndCompiling.test_compile_and_getsource_through_same_function", "TestSourceParsingAndCompiling", "TestSourceParsingAndCompiling.test_compile_and_getsource_simple", "TestSourceParsingAndCompiling.test_compile", "TestSourceParsingAndCompiling.test_getstatementrange_triple_quoted", "TestSourceParsingAndCompiling.test_getstatement"], "tokens": 379}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSourceParsingAndCompiling(object):\n source = Source(\n \"\"\"\\\n def f(x):\n assert (x ==\n 3 +\n 4)\n \"\"\"\n ).strip()\n\n def test_compile(self):\n co = _pytest._code.compile(\"x=3\")\n d = {}\n exec(co, d)\n assert d[\"x\"] == 3\n\n def test_compile_and_getsource_simple(self):\n co = _pytest._code.compile(\"x=3\")\n exec(co)\n source = _pytest._code.Source(co)\n assert str(source) == \"x=3\"\n\n def test_compile_and_getsource_through_same_function(self):\n def gensource(source):\n return _pytest._code.compile(source)\n\n co1 = gensource(\n \"\"\"\n def f():\n raise KeyError()\n \"\"\"\n )\n co2 = gensource(\n \"\"\"\n def f():\n raise ValueError()\n \"\"\"\n )\n source1 = inspect.getsource(co1)\n assert \"KeyError\" in source1\n source2 = inspect.getsource(co2)\n assert \"ValueError\" in source2\n\n def test_getstatement(self):\n # print str(self.source)\n ass = str(self.source[1:])\n for i in range(1, 4):\n # print \"trying start in line %r\" % self.source[i]\n s = self.source.getstatement(i)\n # x = s.deindent()\n assert str(s) == ass\n\n def test_getstatementrange_triple_quoted(self):\n # print str(self.source)\n source = Source(\n \"\"\"hello('''\n ''')\"\"\"\n )\n s = source.getstatement(0)\n assert s == str(source)\n s = source.getstatement(1)\n assert s == str(source)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_TestSourceParsingAndCompiling.test_getstatementrange_within_constructs_TestSourceParsingAndCompiling.test_getstatementrange_within_constructs.None_4": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_TestSourceParsingAndCompiling.test_getstatementrange_within_constructs_TestSourceParsingAndCompiling.test_getstatementrange_within_constructs.None_4", "embedding": null, "metadata": {"file_path": "testing/code/test_source.py", "file_name": "test_source.py", "file_type": "text/x-python", "category": "test", "start_line": 230, "end_line": 251, "span_ids": ["TestSourceParsingAndCompiling.test_getstatementrange_within_constructs"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSourceParsingAndCompiling(object):\n\n @astonly\n def test_getstatementrange_within_constructs(self):\n source = Source(\n \"\"\"\\\n try:\n try:\n raise ValueError\n except SomeThing:\n pass\n finally:\n 42\n \"\"\"\n )\n assert len(source) == 7\n # check all lineno's that could occur in a traceback\n # assert source.getstatementrange(0) == (0, 7)\n # assert source.getstatementrange(1) == (1, 5)\n assert source.getstatementrange(2) == (2, 3)\n assert source.getstatementrange(3) == (3, 4)\n assert source.getstatementrange(4) == (4, 5)\n # assert source.getstatementrange(5) == (0, 7)\n assert source.getstatementrange(6) == (6, 7)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_TestSourceParsingAndCompiling.test_getstatementrange_bug_TestSourceParsingAndCompiling.test_compile_and_getsource.assert_str_stmt_strip_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_TestSourceParsingAndCompiling.test_getstatementrange_bug_TestSourceParsingAndCompiling.test_compile_and_getsource.assert_str_stmt_strip_", "embedding": null, "metadata": {"file_path": "testing/code/test_source.py", "file_name": "test_source.py", "file_type": "text/x-python", "category": "test", "start_line": 253, "end_line": 320, "span_ids": ["TestSourceParsingAndCompiling.test_getstatementrange_bug2", "TestSourceParsingAndCompiling.test_getstatementrange_ast_issue58", "TestSourceParsingAndCompiling.test_getstatementrange_out_of_bounds_py3", "TestSourceParsingAndCompiling.test_getstatementrange_bug", "TestSourceParsingAndCompiling.test_getstatementrange_with_syntaxerror_issue7", "TestSourceParsingAndCompiling.test_compile_to_ast", "TestSourceParsingAndCompiling.test_compile_and_getsource"], "tokens": 455}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSourceParsingAndCompiling(object):\n\n def test_getstatementrange_bug(self):\n source = Source(\n \"\"\"\\\n try:\n x = (\n y +\n z)\n except:\n pass\n \"\"\"\n )\n assert len(source) == 6\n assert source.getstatementrange(2) == (1, 4)\n\n def test_getstatementrange_bug2(self):\n source = Source(\n \"\"\"\\\n assert (\n 33\n ==\n [\n X(3,\n b=1, c=2\n ),\n ]\n )\n \"\"\"\n )\n assert len(source) == 9\n assert source.getstatementrange(5) == (0, 9)\n\n def test_getstatementrange_ast_issue58(self):\n source = Source(\n \"\"\"\\\n\n def test_some():\n for a in [a for a in\n CAUSE_ERROR]: pass\n\n x = 3\n \"\"\"\n )\n assert getstatement(2, source).lines == source.lines[2:3]\n assert getstatement(3, source).lines == source.lines[3:4]\n\n def test_getstatementrange_out_of_bounds_py3(self):\n source = Source(\"if xxx:\\n from .collections import something\")\n r = source.getstatementrange(1)\n assert r == (1, 2)\n\n def test_getstatementrange_with_syntaxerror_issue7(self):\n source = Source(\":\")\n pytest.raises(SyntaxError, lambda: source.getstatementrange(0))\n\n def test_compile_to_ast(self):\n source = Source(\"x = 4\")\n mod = source.compile(flag=ast.PyCF_ONLY_AST)\n assert isinstance(mod, ast.Module)\n compile(mod, \"\", \"exec\")\n\n def test_compile_and_getsource(self):\n co = self.source.compile()\n six.exec_(co, globals())\n f(7)\n excinfo = pytest.raises(AssertionError, f, 6)\n frame = excinfo.traceback[-1].frame\n stmt = frame.code.fullsource.getstatement(frame.lineno)\n assert str(stmt).strip().startswith(\"assert\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_TestSourceParsingAndCompiling.test_compilefuncs_and_path_sanity_TestSourceParsingAndCompiling.test_offsetless_synerr.pytest_raises_SyntaxError": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_TestSourceParsingAndCompiling.test_compilefuncs_and_path_sanity_TestSourceParsingAndCompiling.test_offsetless_synerr.pytest_raises_SyntaxError", "embedding": null, "metadata": {"file_path": "testing/code/test_source.py", "file_name": "test_source.py", "file_type": "text/x-python", "category": "test", "start_line": 320, "end_line": 339, "span_ids": ["TestSourceParsingAndCompiling.test_compilefuncs_and_path_sanity", "TestSourceParsingAndCompiling.test_offsetless_synerr"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSourceParsingAndCompiling(object):\n\n @pytest.mark.parametrize(\"name\", [\"\", None, \"my\"])\n def test_compilefuncs_and_path_sanity(self, name):\n def check(comp, name):\n co = comp(self.source, name)\n if not name:\n expected = \"codegen %s:%d>\" % (mypath, mylineno + 2 + 2)\n else:\n expected = \"codegen %r %s:%d>\" % (name, mypath, mylineno + 2 + 2)\n fn = co.co_filename\n assert fn.endswith(expected)\n\n mycode = _pytest._code.Code(self.test_compilefuncs_and_path_sanity)\n mylineno = mycode.firstlineno\n mypath = mycode.path\n\n for comp in _pytest._code.compile, _pytest._code.Source.compile:\n check(comp, name)\n\n def test_offsetless_synerr(self):\n pytest.raises(SyntaxError, _pytest._code.compile, \"lambda a,a: 0\", mode=\"eval\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_test_getstartingblock_singleline_test_deindent.assert_lines_def_f_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_test_getstartingblock_singleline_test_deindent.assert_lines_def_f_", "embedding": null, "metadata": {"file_path": "testing/code/test_source.py", "file_name": "test_source.py", "file_type": "text/x-python", "category": "test", "start_line": 344, "end_line": 410, "span_ids": ["test_getstartingblock_singleline", "test_getline_finally", "test_getstartingblock_singleline.A", "test_getfuncsource_dynamic", "test_deindent", "test_getfuncsource_with_multine_string", "test_getstartingblock_singleline.A.__init__"], "tokens": 385}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getstartingblock_singleline():\n class A(object):\n def __init__(self, *args):\n frame = sys._getframe(1)\n self.source = _pytest._code.Frame(frame).statement\n\n x = A(\"x\", \"y\")\n\n values = [i for i in x.source.lines if i.strip()]\n assert len(values) == 1\n\n\ndef test_getline_finally():\n def c():\n pass\n\n with pytest.raises(TypeError) as excinfo:\n teardown = None\n try:\n c(1)\n finally:\n if teardown:\n teardown()\n source = excinfo.traceback[-1].statement\n assert str(source).strip() == \"c(1)\"\n\n\ndef test_getfuncsource_dynamic():\n source = \"\"\"\n def f():\n raise ValueError\n\n def g(): pass\n \"\"\"\n co = _pytest._code.compile(source)\n six.exec_(co, globals())\n assert str(_pytest._code.Source(f)).strip() == \"def f():\\n raise ValueError\"\n assert str(_pytest._code.Source(g)).strip() == \"def g(): pass\"\n\n\ndef test_getfuncsource_with_multine_string():\n def f():\n c = \"\"\"while True:\n pass\n\"\"\"\n\n expected = '''\\\n def f():\n c = \"\"\"while True:\n pass\n\"\"\"\n'''\n assert str(_pytest._code.Source(f)) == expected.rstrip()\n\n\ndef test_deindent():\n from _pytest._code.source import deindent as deindent\n\n assert deindent([\"\\tfoo\", \"\\tbar\"]) == [\"foo\", \"bar\"]\n\n source = \"\"\"\\\n def f():\n def g():\n pass\n \"\"\"\n lines = deindent(source.splitlines())\n assert lines == [\"def f():\", \" def g():\", \" pass\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_test_source_of_class_at_eof_without_newline_test_source_of_class_at_eof_without_newline.assert_str_source_strip_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_test_source_of_class_at_eof_without_newline_test_source_of_class_at_eof_without_newline.assert_str_source_strip_", "embedding": null, "metadata": {"file_path": "testing/code/test_source.py", "file_name": "test_source.py", "file_type": "text/x-python", "category": "test", "start_line": 411, "end_line": 424, "span_ids": ["test_source_of_class_at_eof_without_newline"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_source_of_class_at_eof_without_newline(tmpdir, _sys_snapshot):\n # this test fails because the implicit inspect.getsource(A) below\n # does not return the \"x = 1\" last line.\n source = _pytest._code.Source(\n \"\"\"\n class A(object):\n def method(self):\n x = 1\n \"\"\"\n )\n path = tmpdir.join(\"a.py\")\n path.write(source)\n s2 = _pytest._code.Source(tmpdir.join(\"a.py\").pyimport().A)\n assert str(source).strip() == str(s2).strip()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_if_True__test_findsource.assert_src_lineno_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_if_True__test_findsource.assert_src_lineno_", "embedding": null, "metadata": {"file_path": "testing/code/test_source.py", "file_name": "test_source.py", "file_type": "text/x-python", "category": "test", "start_line": 429, "end_line": 478, "span_ids": ["test_getsource_fallback", "test_findsource", "test_findsource_fallback", "impl:5", "test_idem_compile_and_getsource"], "tokens": 262}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "if True:\n\n def x():\n pass\n\n\ndef test_getsource_fallback():\n from _pytest._code.source import getsource\n\n expected = \"\"\"def x():\n pass\"\"\"\n src = getsource(x)\n assert src == expected\n\n\ndef test_idem_compile_and_getsource():\n from _pytest._code.source import getsource\n\n expected = \"def x(): pass\"\n co = _pytest._code.compile(expected)\n src = getsource(co)\n assert src == expected\n\n\ndef test_findsource_fallback():\n from _pytest._code.source import findsource\n\n src, lineno = findsource(x)\n assert \"test_findsource_simple\" in str(src)\n assert src[lineno] == \" def x():\"\n\n\ndef test_findsource():\n from _pytest._code.source import findsource\n\n co = _pytest._code.compile(\n \"\"\"if 1:\n def x():\n pass\n\"\"\"\n )\n\n src, lineno = findsource(co)\n assert \"if 1:\" in str(src)\n\n d = {}\n eval(co, d)\n src, lineno = findsource(d[\"x\"])\n assert \"if 1:\" in str(src)\n assert src[lineno] == \" def x():\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_test_getfslineno_test_getfslineno.assert_getfslineno_B_1_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_test_getfslineno_test_getfslineno.assert_getfslineno_B_1_", "embedding": null, "metadata": {"file_path": "testing/code/test_source.py", "file_name": "test_source.py", "file_type": "text/x-python", "category": "test", "start_line": 479, "end_line": 505, "span_ids": ["test_getfslineno.A:2", "test_getfslineno.B", "test_getfslineno.B:2", "test_getfslineno.A", "test_getfslineno"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getfslineno():\n from _pytest._code import getfslineno\n\n def f(x):\n pass\n\n fspath, lineno = getfslineno(f)\n\n assert fspath.basename == \"test_source.py\"\n assert lineno == _pytest._code.getrawcode(f).co_firstlineno - 1 # see findsource\n\n class A(object):\n pass\n\n fspath, lineno = getfslineno(A)\n\n _, A_lineno = inspect.findsource(A)\n assert fspath.basename == \"test_source.py\"\n assert lineno == A_lineno\n\n assert getfslineno(3) == (\"\", -1)\n\n class B(object):\n pass\n\n B.__name__ = \"B2\"\n assert getfslineno(B)[1] == -1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_test_code_of_object_instance_with_call_test_oneline_and_comment.assert_str_source_ra": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_test_code_of_object_instance_with_call_test_oneline_and_comment.assert_str_source_ra", "embedding": null, "metadata": {"file_path": "testing/code/test_source.py", "file_name": "test_source.py", "file_type": "text/x-python", "category": "test", "start_line": 508, "end_line": 557, "span_ids": ["test_code_of_object_instance_with_call.WithCall.__call__", "test_oneline", "test_code_of_object_instance_with_call", "getstatement", "test_oneline_and_comment", "test_code_of_object_instance_with_call.WithCall", "test_code_of_object_instance_with_call.Hello.__call__", "test_code_of_object_instance_with_call.Hello", "test_comment_and_no_newline_at_end", "test_code_of_object_instance_with_call.A", "test_code_of_object_instance_with_call.A:2"], "tokens": 307}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_code_of_object_instance_with_call():\n class A(object):\n pass\n\n pytest.raises(TypeError, lambda: _pytest._code.Source(A()))\n\n class WithCall(object):\n def __call__(self):\n pass\n\n code = _pytest._code.Code(WithCall())\n assert \"pass\" in str(code.source())\n\n class Hello(object):\n def __call__(self):\n pass\n\n pytest.raises(TypeError, lambda: _pytest._code.Code(Hello))\n\n\ndef getstatement(lineno, source):\n from _pytest._code.source import getstatementrange_ast\n\n source = _pytest._code.Source(source, deindent=False)\n ast, start, end = getstatementrange_ast(lineno, source)\n return source[start:end]\n\n\ndef test_oneline():\n source = getstatement(0, \"raise ValueError\")\n assert str(source) == \"raise ValueError\"\n\n\ndef test_comment_and_no_newline_at_end():\n from _pytest._code.source import getstatementrange_ast\n\n source = Source(\n [\n \"def test_basic_complex():\",\n \" assert 1 == 2\",\n \"# vim: filetype=pyopencl:fdm=marker\",\n ]\n )\n ast, start, end = getstatementrange_ast(1, source)\n assert end == 2\n\n\ndef test_oneline_and_comment():\n source = getstatement(0, \"raise ValueError\\n#hello\")\n assert str(source) == \"raise ValueError\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_test_comments_test_comments.for_line_in_range_tqs_sta.assert_str_getstatement_l": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_test_comments_test_comments.for_line_in_range_tqs_sta.assert_str_getstatement_l", "embedding": null, "metadata": {"file_path": "testing/code/test_source.py", "file_name": "test_source.py", "file_type": "text/x-python", "category": "test", "start_line": 560, "end_line": 583, "span_ids": ["test_comments"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_comments():\n source = '''def test():\n \"comment 1\"\n x = 1\n # comment 2\n # comment 3\n\n assert False\n\n\"\"\"\ncomment 4\n\"\"\"\n'''\n for line in range(2, 6):\n assert str(getstatement(line, source)) == \" x = 1\"\n if sys.version_info >= (3, 8) or hasattr(sys, \"pypy_version_info\"):\n tqs_start = 8\n else:\n tqs_start = 10\n assert str(getstatement(10, source)) == '\"\"\"'\n for line in range(6, tqs_start):\n assert str(getstatement(line, source)) == \" assert False\"\n for line in range(tqs_start, 10):\n assert str(getstatement(line, source)) == '\"\"\"\\ncomment 4\\n\"\"\"'", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_test_comment_in_statement_test_multiline.assert_str_source_ra": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_test_comment_in_statement_test_multiline.assert_str_source_ra", "embedding": null, "metadata": {"file_path": "testing/code/test_source.py", "file_name": "test_source.py", "file_type": "text/x-python", "category": "test", "start_line": 586, "end_line": 627, "span_ids": ["test_single_line_else", "test_comment_in_statement", "test_multiline", "test_single_line_finally", "test_issue55"], "tokens": 271}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_comment_in_statement():\n source = \"\"\"test(foo=1,\n # comment 1\n bar=2)\n\"\"\"\n for line in range(1, 3):\n assert (\n str(getstatement(line, source))\n == \"test(foo=1,\\n # comment 1\\n bar=2)\"\n )\n\n\ndef test_single_line_else():\n source = getstatement(1, \"if False: 2\\nelse: 3\")\n assert str(source) == \"else: 3\"\n\n\ndef test_single_line_finally():\n source = getstatement(1, \"try: 1\\nfinally: 3\")\n assert str(source) == \"finally: 3\"\n\n\ndef test_issue55():\n source = (\n \"def round_trip(dinp):\\n assert 1 == dinp\\n\"\n 'def test_rt():\\n round_trip(\"\"\"\\n\"\"\")\\n'\n )\n s = getstatement(3, source)\n assert str(s) == ' round_trip(\"\"\"\\n\"\"\")'\n\n\ndef test_multiline():\n source = getstatement(\n 0,\n \"\"\"\\\nraise ValueError(\n 23\n)\nx = 3\n\"\"\",\n )\n assert str(source) == \"raise ValueError(\\n 23\\n)\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_TestTry_TestTryFinally.test_finally.assert_str_source_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_TestTry_TestTryFinally.test_finally.assert_str_source_", "embedding": null, "metadata": {"file_path": "testing/code/test_source.py", "file_name": "test_source.py", "file_type": "text/x-python", "category": "test", "start_line": 632, "end_line": 674, "span_ids": ["TestTryFinally.test_finally", "TestTry", "TestTry.test_else", "TestTryFinally.test_body", "TestTry.test_except_line", "TestTryFinally", "TestTry.test_body", "TestTry.test_except_body"], "tokens": 241}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTry(object):\n pytestmark = astonly\n source = \"\"\"\\\ntry:\n raise ValueError\nexcept Something:\n raise IndexError(1)\nelse:\n raise KeyError()\n\"\"\"\n\n def test_body(self):\n source = getstatement(1, self.source)\n assert str(source) == \" raise ValueError\"\n\n def test_except_line(self):\n source = getstatement(2, self.source)\n assert str(source) == \"except Something:\"\n\n def test_except_body(self):\n source = getstatement(3, self.source)\n assert str(source) == \" raise IndexError(1)\"\n\n def test_else(self):\n source = getstatement(5, self.source)\n assert str(source) == \" raise KeyError()\"\n\n\nclass TestTryFinally(object):\n source = \"\"\"\\\ntry:\n raise ValueError\nfinally:\n raise IndexError(1)\n\"\"\"\n\n def test_body(self):\n source = getstatement(1, self.source)\n assert str(source) == \" raise ValueError\"\n\n def test_finally(self):\n source = getstatement(3, self.source)\n assert str(source) == \" raise IndexError(1)\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_TestIf_TestIf.test_else.assert_str_source_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_TestIf_TestIf.test_else.assert_str_source_", "embedding": null, "metadata": {"file_path": "testing/code/test_source.py", "file_name": "test_source.py", "file_type": "text/x-python", "category": "test", "start_line": 677, "end_line": 702, "span_ids": ["TestIf.test_elif", "TestIf.test_elif_clause", "TestIf.test_else", "TestIf", "TestIf.test_body"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestIf(object):\n pytestmark = astonly\n source = \"\"\"\\\nif 1:\n y = 3\nelif False:\n y = 5\nelse:\n y = 7\n\"\"\"\n\n def test_body(self):\n source = getstatement(1, self.source)\n assert str(source) == \" y = 3\"\n\n def test_elif_clause(self):\n source = getstatement(2, self.source)\n assert str(source) == \"elif False:\"\n\n def test_elif(self):\n source = getstatement(3, self.source)\n assert str(source) == \" y = 5\"\n\n def test_else(self):\n source = getstatement(5, self.source)\n assert str(source) == \" y = 7\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_test_semicolon_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/code/test_source.py_test_semicolon_", "embedding": null, "metadata": {"file_path": "testing/code/test_source.py", "file_name": "test_source.py", "file_type": "text/x-python", "category": "test", "start_line": 701, "end_line": 743, "span_ids": ["test_getstartingblock_multiline.A.__init__", "test_getstartingblock_multiline", "test_getstartingblock_multiline.A", "test_semicolon", "XXX_test_expression_multiline", "test_def_online"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_semicolon():\n s = \"\"\"\\\nhello ; pytest.skip()\n\"\"\"\n source = getstatement(0, s)\n assert str(source) == s.strip()\n\n\ndef test_def_online():\n s = \"\"\"\\\ndef func(): raise ValueError(42)\n\ndef something():\n pass\n\"\"\"\n source = getstatement(0, s)\n assert str(source) == \"def func(): raise ValueError(42)\"\n\n\ndef XXX_test_expression_multiline():\n source = \"\"\"\\\nsomething\n'''\n'''\"\"\"\n result = getstatement(1, source)\n assert str(result) == \"'''\\n'''\"\n\n\ndef test_getstartingblock_multiline():\n class A(object):\n def __init__(self, *args):\n frame = sys._getframe(1)\n self.source = _pytest._code.Frame(frame).statement\n\n # fmt: off\n x = A('x',\n 'y'\n ,\n 'z')\n # fmt: on\n values = [i for i in x.source.lines if i.strip()]\n assert len(values) == 4", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/conftest.py_pytest_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/conftest.py_pytest_", "embedding": null, "metadata": {"file_path": "testing/conftest.py", "file_name": "conftest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 37, "span_ids": ["imports", "pytest_collection_modifyitems"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n\n@pytest.hookimpl(hookwrapper=True, tryfirst=True)\ndef pytest_collection_modifyitems(config, items):\n \"\"\"Prefer faster tests.\n\n Use a hookwrapper to do this in the beginning, so e.g. --ff still works\n correctly.\n \"\"\"\n fast_items = []\n slow_items = []\n neutral_items = []\n\n slow_fixturenames = (\"testdir\",)\n\n for item in items:\n try:\n fixtures = item.fixturenames\n except AttributeError:\n # doctest at least\n # (https://github.com/pytest-dev/pytest/issues/5070)\n neutral_items.append(item)\n else:\n if any(x for x in fixtures if x in slow_fixturenames):\n slow_items.append(item)\n else:\n marker = item.get_closest_marker(\"slow\")\n if marker:\n slow_items.append(item)\n else:\n fast_items.append(item)\n\n items[:] = fast_items + neutral_items + slow_items\n\n yield", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/deprecated_test.py_from___future___import_ab_test_getfuncargvalue_is_deprecated.pytest_deprecated_call_re": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/deprecated_test.py_from___future___import_ab_test_getfuncargvalue_is_deprecated.pytest_deprecated_call_re", "embedding": null, "metadata": {"file_path": "testing/deprecated_test.py", "file_name": "deprecated_test.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 39, "span_ids": ["impl", "test_pytest_setup_cfg_unsupported", "test_getfuncargvalue_is_deprecated", "imports", "test_pytest_custom_cfg_unsupported"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport pytest\nfrom _pytest.warning_types import PytestDeprecationWarning\nfrom _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG\n\npytestmark = pytest.mark.pytester_example_path(\"deprecated\")\n\n\ndef test_pytest_setup_cfg_unsupported(testdir):\n testdir.makefile(\n \".cfg\",\n setup=\"\"\"\n [pytest]\n addopts = --verbose\n \"\"\",\n )\n with pytest.raises(pytest.fail.Exception):\n testdir.runpytest()\n\n\ndef test_pytest_custom_cfg_unsupported(testdir):\n testdir.makefile(\n \".cfg\",\n custom=\"\"\"\n [pytest]\n addopts = --verbose\n \"\"\",\n )\n with pytest.raises(pytest.fail.Exception):\n testdir.runpytest(\"-c\", \"custom.cfg\")\n\n\ndef test_getfuncargvalue_is_deprecated(request):\n pytest.deprecated_call(request.getfuncargvalue, \"tmpdir\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/deprecated_test.py_test_resultlog_is_deprecated_test_resultlog_is_deprecated.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/deprecated_test.py_test_resultlog_is_deprecated_test_resultlog_is_deprecated.None_2", "embedding": null, "metadata": {"file_path": "testing/deprecated_test.py", "file_name": "deprecated_test.py", "file_type": "text/x-python", "category": "test", "start_line": 42, "end_line": 59, "span_ids": ["test_resultlog_is_deprecated"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"default\")\ndef test_resultlog_is_deprecated(testdir):\n result = testdir.runpytest(\"--help\")\n result.stdout.fnmatch_lines([\"*DEPRECATED path for machine-readable result log*\"])\n\n testdir.makepyfile(\n \"\"\"\n def test():\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"--result-log=%s\" % testdir.tmpdir.join(\"result.log\"))\n result.stdout.fnmatch_lines(\n [\n \"*--result-log is deprecated and scheduled for removal in pytest 5.0*\",\n \"*See https://docs.pytest.org/en/latest/deprecations.html#result-log-result-log for more information*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/deprecated_test.py_test_terminal_reporter_writer_attr_test_pytest_catchlog_deprecated.res_stdout_fnmatch_lines_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/deprecated_test.py_test_terminal_reporter_writer_attr_test_pytest_catchlog_deprecated.res_stdout_fnmatch_lines_", "embedding": null, "metadata": {"file_path": "testing/deprecated_test.py", "file_name": "deprecated_test.py", "file_type": "text/x-python", "category": "test", "start_line": 62, "end_line": 91, "span_ids": ["test_pytest_catchlog_deprecated", "test_terminal_reporter_writer_attr"], "tokens": 230}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_terminal_reporter_writer_attr(pytestconfig):\n \"\"\"Check that TerminalReporter._tw is also available as 'writer' (#2984)\n This attribute is planned to be deprecated in 3.4.\n \"\"\"\n try:\n import xdist # noqa\n\n pytest.skip(\"xdist workers disable the terminal reporter plugin\")\n except ImportError:\n pass\n terminal_reporter = pytestconfig.pluginmanager.get_plugin(\"terminalreporter\")\n assert terminal_reporter.writer is terminal_reporter._tw\n\n\n@pytest.mark.parametrize(\"plugin\", [\"catchlog\", \"capturelog\"])\n@pytest.mark.filterwarnings(\"default\")\ndef test_pytest_catchlog_deprecated(testdir, plugin):\n testdir.makepyfile(\n \"\"\"\n def test_func(pytestconfig):\n pytestconfig.pluginmanager.register(None, 'pytest_{}')\n \"\"\".format(\n plugin\n )\n )\n res = testdir.runpytest()\n assert res.ret == 0\n res.stdout.fnmatch_lines(\n [\"*pytest-*log plugin has been merged into the core*\", \"*1 passed, 1 warnings*\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/deprecated_test.py_test_raises_message_argument_deprecated_test_pytest_plugins_in_non_top_level_conftest_deprecated.res_stdout_fnmatch_lines_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/deprecated_test.py_test_raises_message_argument_deprecated_test_pytest_plugins_in_non_top_level_conftest_deprecated.res_stdout_fnmatch_lines_", "embedding": null, "metadata": {"file_path": "testing/deprecated_test.py", "file_name": "deprecated_test.py", "file_type": "text/x-python", "category": "test", "start_line": 94, "end_line": 121, "span_ids": ["test_pytest_plugins_in_non_top_level_conftest_deprecated", "test_raises_message_argument_deprecated"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_raises_message_argument_deprecated():\n with pytest.warns(pytest.PytestDeprecationWarning):\n with pytest.raises(RuntimeError, message=\"foobar\"):\n raise RuntimeError\n\n\ndef test_pytest_plugins_in_non_top_level_conftest_deprecated(testdir):\n from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST\n\n testdir.makepyfile(\n **{\n \"subdirectory/conftest.py\": \"\"\"\n pytest_plugins=['capture']\n \"\"\"\n }\n )\n testdir.makepyfile(\n \"\"\"\n def test_func():\n pass\n \"\"\"\n )\n res = testdir.runpytest(SHOW_PYTEST_WARNINGS_ARG)\n assert res.ret == 2\n msg = str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0]\n res.stdout.fnmatch_lines(\n [\"*{msg}*\".format(msg=msg), \"*subdirectory{sep}conftest.py*\".format(sep=os.sep)]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/deprecated_test.py_test_pytest_plugins_in_non_top_level_conftest_unsupported_pyargs_test_pytest_plugins_in_non_top_level_conftest_unsupported_pyargs.if_use_pyargs_.else_.res_stdout_fnmatch_lines_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/deprecated_test.py_test_pytest_plugins_in_non_top_level_conftest_unsupported_pyargs_test_pytest_plugins_in_non_top_level_conftest_unsupported_pyargs.if_use_pyargs_.else_.res_stdout_fnmatch_lines_", "embedding": null, "metadata": {"file_path": "testing/deprecated_test.py", "file_name": "deprecated_test.py", "file_type": "text/x-python", "category": "test", "start_line": 124, "end_line": 150, "span_ids": ["test_pytest_plugins_in_non_top_level_conftest_unsupported_pyargs"], "tokens": 300}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"use_pyargs\", [True, False])\ndef test_pytest_plugins_in_non_top_level_conftest_unsupported_pyargs(\n testdir, use_pyargs\n):\n \"\"\"When using --pyargs, do not emit the warning about non-top-level conftest warnings (#4039, #4044)\"\"\"\n from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST\n\n files = {\n \"src/pkg/__init__.py\": \"\",\n \"src/pkg/conftest.py\": \"\",\n \"src/pkg/test_root.py\": \"def test(): pass\",\n \"src/pkg/sub/__init__.py\": \"\",\n \"src/pkg/sub/conftest.py\": \"pytest_plugins=['capture']\",\n \"src/pkg/sub/test_bar.py\": \"def test(): pass\",\n }\n testdir.makepyfile(**files)\n testdir.syspathinsert(testdir.tmpdir.join(\"src\"))\n\n args = (\"--pyargs\", \"pkg\") if use_pyargs else ()\n args += (SHOW_PYTEST_WARNINGS_ARG,)\n res = testdir.runpytest(*args)\n assert res.ret == (0 if use_pyargs else 2)\n msg = str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0]\n if use_pyargs:\n assert msg not in res.stdout.str()\n else:\n res.stdout.fnmatch_lines([\"*{msg}*\".format(msg=msg)])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/deprecated_test.py_test_pytest_plugins_in_non_top_level_conftest_unsupported_no_top_level_conftest_test_pytest_plugins_in_non_top_level_conftest_unsupported_no_top_level_conftest.res_stdout_fnmatch_lines_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/deprecated_test.py_test_pytest_plugins_in_non_top_level_conftest_unsupported_no_top_level_conftest_test_pytest_plugins_in_non_top_level_conftest_unsupported_no_top_level_conftest.res_stdout_fnmatch_lines_", "embedding": null, "metadata": {"file_path": "testing/deprecated_test.py", "file_name": "deprecated_test.py", "file_type": "text/x-python", "category": "test", "start_line": 153, "end_line": 179, "span_ids": ["test_pytest_plugins_in_non_top_level_conftest_unsupported_no_top_level_conftest"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pytest_plugins_in_non_top_level_conftest_unsupported_no_top_level_conftest(\n testdir\n):\n from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST\n\n subdirectory = testdir.tmpdir.join(\"subdirectory\")\n subdirectory.mkdir()\n testdir.makeconftest(\n \"\"\"\n pytest_plugins=['capture']\n \"\"\"\n )\n testdir.tmpdir.join(\"conftest.py\").move(subdirectory.join(\"conftest.py\"))\n\n testdir.makepyfile(\n \"\"\"\n def test_func():\n pass\n \"\"\"\n )\n\n res = testdir.runpytest_subprocess()\n assert res.ret == 2\n msg = str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0]\n res.stdout.fnmatch_lines(\n [\"*{msg}*\".format(msg=msg), \"*subdirectory{sep}conftest.py*\".format(sep=os.sep)]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/deprecated_test.py_test_pytest_plugins_in_non_top_level_conftest_unsupported_no_false_positives_test_pytest_plugins_in_non_top_level_conftest_unsupported_no_false_positives.assert_msg_not_in_res_std": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/deprecated_test.py_test_pytest_plugins_in_non_top_level_conftest_unsupported_no_false_positives_test_pytest_plugins_in_non_top_level_conftest_unsupported_no_false_positives.assert_msg_not_in_res_std", "embedding": null, "metadata": {"file_path": "testing/deprecated_test.py", "file_name": "deprecated_test.py", "file_type": "text/x-python", "category": "test", "start_line": 182, "end_line": 212, "span_ids": ["test_pytest_plugins_in_non_top_level_conftest_unsupported_no_false_positives"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pytest_plugins_in_non_top_level_conftest_unsupported_no_false_positives(\n testdir\n):\n from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST\n\n subdirectory = testdir.tmpdir.join(\"subdirectory\")\n subdirectory.mkdir()\n testdir.makeconftest(\n \"\"\"\n pass\n \"\"\"\n )\n testdir.tmpdir.join(\"conftest.py\").move(subdirectory.join(\"conftest.py\"))\n\n testdir.makeconftest(\n \"\"\"\n import warnings\n warnings.filterwarnings('always', category=DeprecationWarning)\n pytest_plugins=['capture']\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n def test_func():\n pass\n \"\"\"\n )\n res = testdir.runpytest_subprocess()\n assert res.ret == 0\n msg = str(PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST).splitlines()[0]\n assert msg not in res.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/deprecated_test.py_test_fixture_named_request_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/deprecated_test.py_test_fixture_named_request_", "embedding": null, "metadata": {"file_path": "testing/deprecated_test.py", "file_name": "deprecated_test.py", "file_type": "text/x-python", "category": "test", "start_line": 215, "end_line": 231, "span_ids": ["test_pytest_warns_unknown_kwargs", "test_fixture_named_request"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fixture_named_request(testdir):\n testdir.copy_example()\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"*'request' is a reserved name for fixtures and will raise an error in future versions\"\n ]\n )\n\n\ndef test_pytest_warns_unknown_kwargs():\n with pytest.warns(\n PytestDeprecationWarning,\n match=r\"pytest.warns\\(\\) got unexpected keyword arguments: \\['foo'\\]\",\n ):\n pytest.warns(UserWarning, foo=\"hello\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/acceptance/fixture_mock_integration.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/acceptance/fixture_mock_integration.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/acceptance/fixture_mock_integration.py", "file_name": "fixture_mock_integration.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 21, "span_ids": ["impl", "my_fixture", "docstring", "impl:2", "impl:3", "imports:2", "imports", "test_foobar"], "tokens": 89}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"Reproduces issue #3774\"\"\"\n\ntry:\n import mock\nexcept ImportError:\n import unittest.mock as mock\n\nimport pytest\n\nconfig = {\"mykey\": \"ORIGINAL\"}\n\n\n@pytest.fixture(scope=\"function\")\n@mock.patch.dict(config, {\"mykey\": \"MOCKED\"})\ndef my_fixture():\n return config[\"mykey\"]\n\n\ndef test_foobar(my_fixture):\n assert my_fixture == \"MOCKED\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/collect/collect_init_tests/tests/__init__.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/collect/collect_init_tests/tests/__init__.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/collect/collect_init_tests/tests/__init__.py", "file_name": "__init__.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 3, "span_ids": ["test_init"], "tokens": 6}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_init():\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/collect/collect_init_tests/tests/test_foo.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/collect/collect_init_tests/tests/test_foo.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/collect/collect_init_tests/tests/test_foo.py", "file_name": "test_foo.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 3, "span_ids": ["test_foo"], "tokens": 7}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_foo():\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/collect/package_infinite_recursion/conftest.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/collect/package_infinite_recursion/conftest.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/collect/package_infinite_recursion/conftest.py", "file_name": "conftest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 3, "span_ids": ["pytest_ignore_collect"], "tokens": 9}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_ignore_collect(path):\n return False", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/collect/package_infinite_recursion/tests/test_basic.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/collect/package_infinite_recursion/tests/test_basic.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/collect/package_infinite_recursion/tests/test_basic.py", "file_name": "test_basic.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 3, "span_ids": ["test"], "tokens": 5}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test():\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/collect/package_init_given_as_arg/pkg/test_foo.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/collect/package_init_given_as_arg/pkg/test_foo.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/collect/package_init_given_as_arg/pkg/test_foo.py", "file_name": "test_foo.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 3, "span_ids": ["test"], "tokens": 5}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test():\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/config/collect_pytest_prefix/conftest.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/config/collect_pytest_prefix/conftest.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/config/collect_pytest_prefix/conftest.py", "file_name": "conftest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 3, "span_ids": ["pytest_something"], "tokens": 8}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class pytest_something(object):\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/config/collect_pytest_prefix/test_foo.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/config/collect_pytest_prefix/test_foo.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/config/collect_pytest_prefix/test_foo.py", "file_name": "test_foo.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 3, "span_ids": ["test_foo"], "tokens": 7}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_foo():\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/conftest_usageerror/conftest.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/conftest_usageerror/conftest.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/conftest_usageerror/conftest.py", "file_name": "conftest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 9, "span_ids": ["pytest_unconfigure", "pytest_configure"], "tokens": 31}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_configure(config):\n import pytest\n\n raise pytest.UsageError(\"hello\")\n\n\ndef pytest_unconfigure(config):\n print(\"pytest_unconfigure_called\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/dataclasses/test_compare_dataclasses.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/dataclasses/test_compare_dataclasses.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/dataclasses/test_compare_dataclasses.py", "file_name": "test_compare_dataclasses.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 15, "span_ids": ["test_dataclasses.SimpleDataObject:2", "imports", "test_dataclasses", "test_dataclasses.SimpleDataObject"], "tokens": 75}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from dataclasses import dataclass\nfrom dataclasses import field\n\n\ndef test_dataclasses():\n @dataclass\n class SimpleDataObject(object):\n field_a: int = field()\n field_b: int = field()\n\n left = SimpleDataObject(1, \"b\")\n right = SimpleDataObject(1, \"c\")\n\n assert left == right", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/dataclasses/test_compare_dataclasses_field_comparison_off.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/dataclasses/test_compare_dataclasses_field_comparison_off.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/dataclasses/test_compare_dataclasses_field_comparison_off.py", "file_name": "test_compare_dataclasses_field_comparison_off.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 15, "span_ids": ["imports", "test_dataclasses_with_attribute_comparison_off.SimpleDataObject:2", "test_dataclasses_with_attribute_comparison_off.SimpleDataObject", "test_dataclasses_with_attribute_comparison_off"], "tokens": 81}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from dataclasses import dataclass\nfrom dataclasses import field\n\n\ndef test_dataclasses_with_attribute_comparison_off():\n @dataclass\n class SimpleDataObject(object):\n field_a: int = field()\n field_b: int = field(compare=False)\n\n left = SimpleDataObject(1, \"b\")\n right = SimpleDataObject(1, \"c\")\n\n assert left == right", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/dataclasses/test_compare_dataclasses_verbose.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/dataclasses/test_compare_dataclasses_verbose.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/dataclasses/test_compare_dataclasses_verbose.py", "file_name": "test_compare_dataclasses_verbose.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 15, "span_ids": ["imports", "test_dataclasses_verbose.SimpleDataObject", "test_dataclasses_verbose.SimpleDataObject:2", "test_dataclasses_verbose"], "tokens": 76}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from dataclasses import dataclass\nfrom dataclasses import field\n\n\ndef test_dataclasses_verbose():\n @dataclass\n class SimpleDataObject(object):\n field_a: int = field()\n field_b: int = field()\n\n left = SimpleDataObject(1, \"b\")\n right = SimpleDataObject(1, \"c\")\n\n assert left == right", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/dataclasses/test_compare_two_different_dataclasses.py_from_dataclasses_import_d_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/dataclasses/test_compare_two_different_dataclasses.py_from_dataclasses_import_d_", "embedding": null, "metadata": {"file_path": "testing/example_scripts/dataclasses/test_compare_two_different_dataclasses.py", "file_name": "test_compare_two_different_dataclasses.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 20, "span_ids": ["test_comparing_two_different_data_classes.SimpleDataObjectTwo", "test_comparing_two_different_data_classes.SimpleDataObjectTwo:2", "test_comparing_two_different_data_classes.SimpleDataObjectOne:2", "test_comparing_two_different_data_classes", "test_comparing_two_different_data_classes.SimpleDataObjectOne", "imports"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from dataclasses import dataclass\nfrom dataclasses import field\n\n\ndef test_comparing_two_different_data_classes():\n @dataclass\n class SimpleDataObjectOne(object):\n field_a: int = field()\n field_b: int = field()\n\n @dataclass\n class SimpleDataObjectTwo(object):\n field_a: int = field()\n field_b: int = field()\n\n left = SimpleDataObjectOne(1, \"b\")\n right = SimpleDataObjectTwo(1, \"c\")\n\n assert left != right", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/deprecated/test_fixture_named_request.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/deprecated/test_fixture_named_request.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/deprecated/test_fixture_named_request.py", "file_name": "test_fixture_named_request.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 11, "span_ids": ["imports", "request", "test"], "tokens": 17}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n\n@pytest.fixture\ndef request():\n pass\n\n\ndef test():\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/custom_item/conftest.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/custom_item/conftest.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/fixtures/custom_item/conftest.py", "file_name": "conftest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 11, "span_ids": ["imports", "CustomItem", "CustomItem.runtest", "pytest_collect_file"], "tokens": 38}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n\nclass CustomItem(pytest.Item, pytest.File):\n def runtest(self):\n pass\n\n\ndef pytest_collect_file(path, parent):\n return CustomItem(path, parent)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/custom_item/foo/test_foo.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/custom_item/foo/test_foo.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/fixtures/custom_item/foo/test_foo.py", "file_name": "test_foo.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 3, "span_ids": ["test"], "tokens": 5}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test():\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub1/conftest.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub1/conftest.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub1/conftest.py", "file_name": "conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 8, "span_ids": ["imports", "arg1"], "tokens": 26}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n\n@pytest.fixture\ndef arg1(request):\n with pytest.raises(Exception):\n request.getfixturevalue(\"arg2\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub1/test_in_sub1.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub1/test_in_sub1.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub1/test_in_sub1.py", "file_name": "test_in_sub1.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 3, "span_ids": ["test_1"], "tokens": 9}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_1(arg1):\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub2/conftest.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub2/conftest.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub2/conftest.py", "file_name": "conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 7, "span_ids": ["arg2", "imports"], "tokens": 25}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n\n@pytest.fixture\ndef arg2(request):\n pytest.raises(Exception, request.getfixturevalue, \"arg1\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub2/test_in_sub2.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub2/test_in_sub2.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub2/test_in_sub2.py", "file_name": "test_in_sub2.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 3, "span_ids": ["test_2"], "tokens": 9}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_2(arg2):\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_detect_recursive_dependency_error.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_detect_recursive_dependency_error.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/fixtures/fill_fixtures/test_detect_recursive_dependency_error.py", "file_name": "test_detect_recursive_dependency_error.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 16, "span_ids": ["imports", "test", "fix1", "fix2"], "tokens": 41}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n\n@pytest.fixture\ndef fix1(fix2):\n return 1\n\n\n@pytest.fixture\ndef fix2(fix1):\n return 1\n\n\ndef test(fix1):\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_conftest/conftest.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_conftest/conftest.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_conftest/conftest.py", "file_name": "conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 7, "span_ids": ["imports", "spam"], "tokens": 14}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n\n@pytest.fixture\ndef spam():\n return \"spam\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_conftest/pkg/conftest.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_conftest/pkg/conftest.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_conftest/pkg/conftest.py", "file_name": "conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 7, "span_ids": ["imports", "spam"], "tokens": 17}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n\n@pytest.fixture\ndef spam(spam):\n return spam * 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_conftest/pkg/test_spam.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_conftest/pkg/test_spam.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_conftest/pkg/test_spam.py", "file_name": "test_spam.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 3, "span_ids": ["test_spam"], "tokens": 15}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_spam(spam):\n assert spam == \"spamspam\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_module/conftest.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_module/conftest.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_module/conftest.py", "file_name": "conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 7, "span_ids": ["imports", "spam"], "tokens": 14}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n\n@pytest.fixture\ndef spam():\n return \"spam\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_module/test_extend_fixture_conftest_module.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_module/test_extend_fixture_conftest_module.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_conftest_module/test_extend_fixture_conftest_module.py", "file_name": "test_extend_fixture_conftest_module.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 11, "span_ids": ["imports", "test_spam", "spam"], "tokens": 33}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n\n@pytest.fixture\ndef spam(spam):\n return spam * 2\n\n\ndef test_spam(spam):\n assert spam == \"spamspam\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_module_class.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_module_class.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/fixtures/fill_fixtures/test_extend_fixture_module_class.py", "file_name": "test_extend_fixture_module_class.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 16, "span_ids": ["TestSpam.spam", "spam", "TestSpam.test_spam", "imports", "TestSpam"], "tokens": 56}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n\n@pytest.fixture\ndef spam():\n return \"spam\"\n\n\nclass TestSpam(object):\n @pytest.fixture\n def spam(self, spam):\n return spam * 2\n\n def test_spam(self, spam):\n assert spam == \"spamspam\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_funcarg_basic.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_funcarg_basic.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/fixtures/fill_fixtures/test_funcarg_basic.py", "file_name": "test_funcarg_basic.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 16, "span_ids": ["some", "imports", "test_func", "other"], "tokens": 40}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n\n@pytest.fixture\ndef some(request):\n return request.function.__name__\n\n\n@pytest.fixture\ndef other(request):\n return 42\n\n\ndef test_func(some, other):\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_funcarg_lookup_classlevel.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_funcarg_lookup_classlevel.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/fixtures/fill_fixtures/test_funcarg_lookup_classlevel.py", "file_name": "test_funcarg_lookup_classlevel.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 11, "span_ids": ["TestClass", "imports", "TestClass.something", "TestClass.test_method"], "tokens": 38}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n\nclass TestClass(object):\n @pytest.fixture\n def something(self, request):\n return request.instance\n\n def test_method(self, something):\n assert something is self", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_funcarg_lookup_modulelevel.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_funcarg_lookup_modulelevel.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/fixtures/fill_fixtures/test_funcarg_lookup_modulelevel.py", "file_name": "test_funcarg_lookup_modulelevel.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 16, "span_ids": ["TestClass", "test_func", "TestClass.test_method", "something", "imports"], "tokens": 53}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n\n@pytest.fixture\ndef something(request):\n return request.function.__name__\n\n\nclass TestClass(object):\n def test_method(self, something):\n assert something == \"test_method\"\n\n\ndef test_func(something):\n assert something == \"test_func\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_funcarg_lookupfails.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/fill_fixtures/test_funcarg_lookupfails.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/fixtures/fill_fixtures/test_funcarg_lookupfails.py", "file_name": "test_funcarg_lookupfails.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 11, "span_ids": ["imports", "test_func", "xyzsomething"], "tokens": 24}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n\n@pytest.fixture\ndef xyzsomething(request):\n return 42\n\n\ndef test_func(some):\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/test_getfixturevalue_dynamic.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/fixtures/test_getfixturevalue_dynamic.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/fixtures/test_getfixturevalue_dynamic.py", "file_name": "test_getfixturevalue_dynamic.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 21, "span_ids": ["dynamic", "test", "a", "b", "imports"], "tokens": 61}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n\n@pytest.fixture\ndef dynamic():\n pass\n\n\n@pytest.fixture\ndef a(request):\n request.getfixturevalue(\"dynamic\")\n\n\n@pytest.fixture\ndef b(a):\n pass\n\n\ndef test(b, request):\n assert request.fixturenames == [\"b\", \"request\", \"a\", \"dynamic\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/issue88_initial_file_multinodes/conftest.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/issue88_initial_file_multinodes/conftest.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/issue88_initial_file_multinodes/conftest.py", "file_name": "conftest.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 15, "span_ids": ["MyItem", "MyFile", "MyFile.collect", "pytest_collect_file", "imports"], "tokens": 52}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n\nclass MyFile(pytest.File):\n def collect(self):\n return [MyItem(\"hello\", parent=self)]\n\n\ndef pytest_collect_file(path, parent):\n return MyFile(path, parent)\n\n\nclass MyItem(pytest.Item):\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/issue88_initial_file_multinodes/test_hello.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/issue88_initial_file_multinodes/test_hello.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/issue88_initial_file_multinodes/test_hello.py", "file_name": "test_hello.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 3, "span_ids": ["test_hello"], "tokens": 6}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_hello():\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/issue_519.py_pprint_checked_order.assert_order_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/issue_519.py_pprint_checked_order.assert_order_", "embedding": null, "metadata": {"file_path": "testing/example_scripts/issue_519.py", "file_name": "issue_519.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 31, "span_ids": ["imports", "pytest_generate_tests", "checked_order"], "tokens": 350}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pprint\n\nimport pytest\n\n\ndef pytest_generate_tests(metafunc):\n if \"arg1\" in metafunc.fixturenames:\n metafunc.parametrize(\"arg1\", [\"arg1v1\", \"arg1v2\"], scope=\"module\")\n\n if \"arg2\" in metafunc.fixturenames:\n metafunc.parametrize(\"arg2\", [\"arg2v1\", \"arg2v2\"], scope=\"function\")\n\n\n@pytest.fixture(scope=\"session\")\ndef checked_order():\n order = []\n\n yield order\n pprint.pprint(order)\n assert order == [\n (\"testing/example_scripts/issue_519.py\", \"fix1\", \"arg1v1\"),\n (\"test_one[arg1v1-arg2v1]\", \"fix2\", \"arg2v1\"),\n (\"test_two[arg1v1-arg2v1]\", \"fix2\", \"arg2v1\"),\n (\"test_one[arg1v1-arg2v2]\", \"fix2\", \"arg2v2\"),\n (\"test_two[arg1v1-arg2v2]\", \"fix2\", \"arg2v2\"),\n (\"testing/example_scripts/issue_519.py\", \"fix1\", \"arg1v2\"),\n (\"test_one[arg1v2-arg2v1]\", \"fix2\", \"arg2v1\"),\n (\"test_two[arg1v2-arg2v1]\", \"fix2\", \"arg2v1\"),\n (\"test_one[arg1v2-arg2v2]\", \"fix2\", \"arg2v2\"),\n (\"test_two[arg1v2-arg2v2]\", \"fix2\", \"arg2v2\"),\n ]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/issue_519.py_fix1_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/issue_519.py_fix1_", "embedding": null, "metadata": {"file_path": "testing/example_scripts/issue_519.py", "file_name": "issue_519.py", "file_type": "text/x-python", "category": "implementation", "start_line": 34, "end_line": 52, "span_ids": ["test_one", "test_two", "fix1", "fix2"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.yield_fixture(scope=\"module\")\ndef fix1(request, arg1, checked_order):\n checked_order.append((request.node.name, \"fix1\", arg1))\n yield \"fix1-\" + arg1\n\n\n@pytest.yield_fixture(scope=\"function\")\ndef fix2(request, fix1, arg2, checked_order):\n checked_order.append((request.node.name, \"fix2\", arg2))\n yield \"fix2-\" + arg2 + fix1\n\n\ndef test_one(fix2):\n pass\n\n\ndef test_two(fix2):\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/marks/marks_considered_keywords/test_marks_as_keywords.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/marks/marks_considered_keywords/test_marks_as_keywords.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/marks/marks_considered_keywords/test_marks_as_keywords.py", "file_name": "test_marks_as_keywords.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 7, "span_ids": ["imports", "test_mark"], "tokens": 13}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n\n@pytest.mark.foo\ndef test_mark():\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/perf_examples/collect_stats/generate_folders.py_argparse_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/perf_examples/collect_stats/generate_folders.py_argparse_", "embedding": null, "metadata": {"file_path": "testing/example_scripts/perf_examples/collect_stats/generate_folders.py", "file_name": "generate_folders.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 28, "span_ids": ["impl:8", "imports", "generate_folders", "impl"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import argparse\nimport pathlib\n\nHERE = pathlib.Path(__file__).parent\nTEST_CONTENT = (HERE / \"template_test.py\").read_bytes()\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"numbers\", nargs=\"*\", type=int)\n\n\ndef generate_folders(root, elements, *more_numbers):\n fill_len = len(str(elements))\n if more_numbers:\n for i in range(elements):\n new_folder = root.joinpath(f\"foo_{i:0>{fill_len}}\")\n new_folder.mkdir()\n new_folder.joinpath(\"__init__.py\").write_bytes(TEST_CONTENT)\n generate_folders(new_folder, *more_numbers)\n else:\n for i in range(elements):\n new_test = root.joinpath(f\"test_{i:0<{fill_len}}.py\")\n new_test.write_bytes(TEST_CONTENT)\n\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n generate_folders(HERE, *(args.numbers or (10, 100)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/perf_examples/collect_stats/template_test.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/perf_examples/collect_stats/template_test.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/perf_examples/collect_stats/template_test.py", "file_name": "template_test.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 3, "span_ids": ["test_x"], "tokens": 6}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_x():\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/tmpdir/tmpdir_fixture.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/tmpdir/tmpdir_fixture.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/tmpdir/tmpdir_fixture.py", "file_name": "tmpdir_fixture.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 8, "span_ids": ["imports", "test_fixture"], "tokens": 41}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n\n@pytest.mark.parametrize(\"a\", [r\"qwe/\\abc\"])\ndef test_fixture(tmpdir, a):\n tmpdir.check(dir=1)\n assert tmpdir.listdir() == []", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/unittest/test_parametrized_fixture_error_message.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/unittest/test_parametrized_fixture_error_message.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/unittest/test_parametrized_fixture_error_message.py", "file_name": "test_parametrized_fixture_error_message.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 15, "span_ids": ["imports", "TestSomethingElse.test_two", "TestSomethingElse", "two"], "tokens": 46}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import unittest\n\nimport pytest\n\n\n@pytest.fixture(params=[1, 2])\ndef two(request):\n return request.param\n\n\n@pytest.mark.usefixtures(\"two\")\nclass TestSomethingElse(unittest.TestCase):\n def test_two(self):\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/unittest/test_setup_skip.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/unittest/test_setup_skip.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/unittest/test_setup_skip.py", "file_name": "test_setup_skip.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 14, "span_ids": ["docstring", "Test.test_foo", "Base.setUp", "Base", "Test", "imports"], "tokens": 61}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"Skipping an entire subclass with unittest.skip() should *not* call setUp from a base class.\"\"\"\nimport unittest\n\n\nclass Base(unittest.TestCase):\n def setUp(self):\n assert 0\n\n\n@unittest.skip(\"skip all tests\")\nclass Test(Base):\n def test_foo(self):\n assert 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/unittest/test_setup_skip_class.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/unittest/test_setup_skip_class.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/unittest/test_setup_skip_class.py", "file_name": "test_setup_skip_class.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 15, "span_ids": ["Base.setUpClass", "docstring", "Test.test_foo", "Base", "Test", "imports"], "tokens": 67}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"Skipping an entire subclass with unittest.skip() should *not* call setUpClass from a base class.\"\"\"\nimport unittest\n\n\nclass Base(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n assert 0\n\n\n@unittest.skip(\"skip all tests\")\nclass Test(Base):\n def test_foo(self):\n assert 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/unittest/test_setup_skip_module.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/unittest/test_setup_skip_module.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/unittest/test_setup_skip_module.py", "file_name": "test_setup_skip_module.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 13, "span_ids": ["setUpModule", "docstring", "Base.test", "Base", "imports"], "tokens": 51}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"setUpModule is always called, even if all tests in the module are skipped\"\"\"\nimport unittest\n\n\ndef setUpModule():\n assert 0\n\n\n@unittest.skip(\"skip all tests\")\nclass Base(unittest.TestCase):\n def test(self):\n assert 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/warnings/test_group_warnings_by_message.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/example_scripts/warnings/test_group_warnings_by_message.py__", "embedding": null, "metadata": {"file_path": "testing/example_scripts/warnings/test_group_warnings_by_message.py", "file_name": "test_group_warnings_by_message.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 17, "span_ids": ["test_foo", "imports", "func", "test_bar"], "tokens": 44}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import warnings\n\nimport pytest\n\n\ndef func():\n warnings.warn(UserWarning(\"foo\"))\n\n\n@pytest.mark.parametrize(\"i\", range(5))\ndef test_foo(i):\n func()\n\n\ndef test_bar():\n func()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/examples/test_issue519.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/examples/test_issue519.py__", "embedding": null, "metadata": {"file_path": "testing/examples/test_issue519.py", "file_name": "test_issue519.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 4, "span_ids": ["test_510"], "tokens": 29}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_510(testdir):\n testdir.copy_example(\"issue_519.py\")\n testdir.runpytest(\"issue_519.py\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/freeze/create_executable.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/freeze/create_executable.py__", "embedding": null, "metadata": {"file_path": "testing/freeze/create_executable.py", "file_name": "create_executable.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 14, "span_ids": ["impl", "imports", "docstring", "impl:2"], "tokens": 97}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nGenerates an executable with pytest runner embedded using PyInstaller.\n\"\"\"\nif __name__ == \"__main__\":\n import pytest\n import subprocess\n\n hidden = []\n for x in pytest.freeze_includes():\n hidden.extend([\"--hidden-import\", x])\n hidden.extend([\"--hidden-import\", \"distutils\"])\n args = [\"pyinstaller\", \"--noconfirm\"] + hidden + [\"runtests_script.py\"]\n subprocess.check_call(\" \".join(args), shell=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/freeze/runtests_script.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/freeze/runtests_script.py__", "embedding": null, "metadata": {"file_path": "testing/freeze/runtests_script.py", "file_name": "runtests_script.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 11, "span_ids": ["impl", "imports", "docstring", "impl:2"], "tokens": 44}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nThis is the script that is actually frozen into an executable: simply executes\npy.test main().\n\"\"\"\n\nif __name__ == \"__main__\":\n import sys\n import pytest\n\n sys.exit(pytest.main())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/freeze/tests/test_trivial.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/freeze/tests/test_trivial.py__", "embedding": null, "metadata": {"file_path": "testing/freeze/tests/test_trivial.py", "file_name": "test_trivial.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 7, "span_ids": ["test_lower", "test_upper"], "tokens": 32}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_upper():\n assert \"foo\".upper() == \"FOO\"\n\n\ndef test_lower():\n assert \"FOO\".lower() == \"foo\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/freeze/tox_run.py__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/freeze/tox_run.py__", "embedding": null, "metadata": {"file_path": "testing/freeze/tox_run.py", "file_name": "tox_run.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 13, "span_ids": ["impl", "imports", "docstring", "impl:2"], "tokens": 85}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nCalled by tox.ini: uses the generated executable to run the tests in ./tests/\ndirectory.\n\"\"\"\nif __name__ == \"__main__\":\n import os\n import sys\n\n executable = os.path.join(os.getcwd(), \"dist\", \"runtests_script\", \"runtests_script\")\n if sys.platform.startswith(\"win\"):\n executable += \".exe\"\n sys.exit(os.system(\"%s tests\" % executable))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/io/test_saferepr.py__coding_utf_8__test_maxsize_error_on_instance.assert_s_0_and_s_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/io/test_saferepr.py__coding_utf_8__test_maxsize_error_on_instance.assert_s_0_and_s_", "embedding": null, "metadata": {"file_path": "testing/io/test_saferepr.py", "file_name": "test_saferepr.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 24, "span_ids": ["test_simple_repr", "test_maxsize_error_on_instance.A", "docstring", "imports", "test_maxsize_error_on_instance.A.__repr__", "test_maxsize", "test_maxsize_error_on_instance"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -*- coding: utf-8 -*-\nfrom _pytest._io.saferepr import saferepr\n\n\ndef test_simple_repr():\n assert saferepr(1) == \"1\"\n assert saferepr(None) == \"None\"\n\n\ndef test_maxsize():\n s = saferepr(\"x\" * 50, maxsize=25)\n assert len(s) == 25\n expected = repr(\"x\" * 10 + \"...\" + \"x\" * 10)\n assert s == expected\n\n\ndef test_maxsize_error_on_instance():\n class A:\n def __repr__():\n raise ValueError(\"...\")\n\n s = saferepr((\"*\" * 50, A()), maxsize=25)\n assert len(s) == 25\n assert s[0] == \"(\" and s[-1] == \")\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/io/test_saferepr.py_test_exceptions_test_exceptions.assert_unknown_in_s2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/io/test_saferepr.py_test_exceptions_test_exceptions.assert_unknown_in_s2", "embedding": null, "metadata": {"file_path": "testing/io/test_saferepr.py", "file_name": "test_saferepr.py", "file_type": "text/x-python", "category": "test", "start_line": 27, "end_line": 46, "span_ids": ["test_exceptions", "test_exceptions.BrokenReprException:2", "test_exceptions.BrokenRepr", "test_exceptions.BrokenRepr.__init__", "test_exceptions.BrokenReprException"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_exceptions():\n class BrokenRepr:\n def __init__(self, ex):\n self.ex = ex\n\n def __repr__(self):\n raise self.ex\n\n class BrokenReprException(Exception):\n __str__ = None\n __repr__ = None\n\n assert \"Exception\" in saferepr(BrokenRepr(Exception(\"broken\")))\n s = saferepr(BrokenReprException(\"really broken\"))\n assert \"TypeError\" in s\n assert \"TypeError\" in saferepr(BrokenRepr(\"string\"))\n\n s2 = saferepr(BrokenRepr(BrokenReprException(\"omg even worse\")))\n assert \"NameError\" not in s2\n assert \"unknown\" in s2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/io/test_saferepr.py_test_big_repr_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/io/test_saferepr.py_test_big_repr_", "embedding": null, "metadata": {"file_path": "testing/io/test_saferepr.py", "file_name": "test_saferepr.py", "file_type": "text/x-python", "category": "test", "start_line": 49, "end_line": 67, "span_ids": ["test_unicode", "test_repr_on_newstyle.Function", "test_repr_on_newstyle.Function.__repr__", "test_big_repr", "test_repr_on_newstyle"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_big_repr():\n from _pytest._io.saferepr import SafeRepr\n\n assert len(saferepr(range(1000))) <= len(\"[\" + SafeRepr().maxlist * \"1000\" + \"]\")\n\n\ndef test_repr_on_newstyle():\n class Function(object):\n def __repr__(self):\n return \"<%s>\" % (self.name)\n\n assert saferepr(Function())\n\n\ndef test_unicode():\n val = u\"\u00a3\u20ac\"\n reprval = u\"'\u00a3\u20ac'\"\n assert saferepr(val) == reprval", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_fixture.py__coding_utf_8__test_change_level.assert_CRITICAL_in_capl": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_fixture.py__coding_utf_8__test_change_level.assert_CRITICAL_in_capl", "embedding": null, "metadata": {"file_path": "testing/logging/test_fixture.py", "file_name": "test_fixture.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 27, "span_ids": ["impl", "docstring", "test_fixture_help", "test_change_level", "imports"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -*- coding: utf-8 -*-\nimport logging\n\nimport pytest\n\nlogger = logging.getLogger(__name__)\nsublogger = logging.getLogger(__name__ + \".baz\")\n\n\ndef test_fixture_help(testdir):\n result = testdir.runpytest(\"--fixtures\")\n result.stdout.fnmatch_lines([\"*caplog*\"])\n\n\ndef test_change_level(caplog):\n caplog.set_level(logging.INFO)\n logger.debug(\"handler DEBUG level\")\n logger.info(\"handler INFO level\")\n\n caplog.set_level(logging.CRITICAL, logger=sublogger.name)\n sublogger.warning(\"logger WARNING level\")\n sublogger.critical(\"logger CRITICAL level\")\n\n assert \"DEBUG\" not in caplog.text\n assert \"INFO\" in caplog.text\n assert \"WARNING\" not in caplog.text\n assert \"CRITICAL\" in caplog.text", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_fixture.py_test_change_level_undo_test_change_level_undo.assert_log_from_test2_n": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_fixture.py_test_change_level_undo_test_change_level_undo.assert_log_from_test2_n", "embedding": null, "metadata": {"file_path": "testing/logging/test_fixture.py", "file_name": "test_fixture.py", "file_type": "text/x-python", "category": "test", "start_line": 30, "end_line": 50, "span_ids": ["test_change_level_undo"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_change_level_undo(testdir):\n \"\"\"Ensure that 'set_level' is undone after the end of the test\"\"\"\n testdir.makepyfile(\n \"\"\"\n import logging\n\n def test1(caplog):\n caplog.set_level(logging.INFO)\n # using + operator here so fnmatch_lines doesn't match the code in the traceback\n logging.info('log from ' + 'test1')\n assert 0\n\n def test2(caplog):\n # using + operator here so fnmatch_lines doesn't match the code in the traceback\n logging.info('log from ' + 'test2')\n assert 0\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*log from test1*\", \"*2 failed in *\"])\n assert \"log from test2\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_fixture.py_test_with_statement_test_log_access.assert_boo_arg_in_caplo": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_fixture.py_test_with_statement_test_log_access.assert_boo_arg_in_caplo", "embedding": null, "metadata": {"file_path": "testing/logging/test_fixture.py", "file_name": "test_fixture.py", "file_type": "text/x-python", "category": "test", "start_line": 53, "end_line": 73, "span_ids": ["test_with_statement", "test_log_access"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_with_statement(caplog):\n with caplog.at_level(logging.INFO):\n logger.debug(\"handler DEBUG level\")\n logger.info(\"handler INFO level\")\n\n with caplog.at_level(logging.CRITICAL, logger=sublogger.name):\n sublogger.warning(\"logger WARNING level\")\n sublogger.critical(\"logger CRITICAL level\")\n\n assert \"DEBUG\" not in caplog.text\n assert \"INFO\" in caplog.text\n assert \"WARNING\" not in caplog.text\n assert \"CRITICAL\" in caplog.text\n\n\ndef test_log_access(caplog):\n caplog.set_level(logging.INFO)\n logger.info(\"boo %s\", \"arg\")\n assert caplog.records[0].levelname == \"INFO\"\n assert caplog.records[0].msg == \"boo %s\"\n assert \"boo arg\" in caplog.text", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_fixture.py_test_messages_test_messages.assert_Exception_not_in": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_fixture.py_test_messages_test_messages.assert_Exception_not_in", "embedding": null, "metadata": {"file_path": "testing/logging/test_fixture.py", "file_name": "test_fixture.py", "file_type": "text/x-python", "category": "test", "start_line": 76, "end_line": 94, "span_ids": ["test_messages"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_messages(caplog):\n caplog.set_level(logging.INFO)\n logger.info(\"boo %s\", \"arg\")\n logger.info(\"bar %s\\nbaz %s\", \"arg1\", \"arg2\")\n assert \"boo arg\" == caplog.messages[0]\n assert \"bar arg1\\nbaz arg2\" == caplog.messages[1]\n assert caplog.text.count(\"\\n\") > len(caplog.messages)\n assert len(caplog.text.splitlines()) > len(caplog.messages)\n\n try:\n raise Exception(\"test\")\n except Exception:\n logger.exception(\"oops\")\n\n assert \"oops\" in caplog.text\n assert \"oops\" in caplog.messages[-1]\n # Tracebacks are stored in the record and not added until the formatter or handler.\n assert \"Exception\" in caplog.text\n assert \"Exception\" not in caplog.messages[-1]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_fixture.py_test_record_tuples_logging_during_setup_and_teardown.assert_x_message_for_x_i": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_fixture.py_test_record_tuples_logging_during_setup_and_teardown.assert_x_message_for_x_i", "embedding": null, "metadata": {"file_path": "testing/logging/test_fixture.py", "file_name": "test_fixture.py", "file_type": "text/x-python", "category": "test", "start_line": 97, "end_line": 128, "span_ids": ["logging_during_setup_and_teardown", "test_clear", "test_unicode", "test_record_tuples"], "tokens": 227}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_record_tuples(caplog):\n caplog.set_level(logging.INFO)\n logger.info(\"boo %s\", \"arg\")\n\n assert caplog.record_tuples == [(__name__, logging.INFO, \"boo arg\")]\n\n\ndef test_unicode(caplog):\n caplog.set_level(logging.INFO)\n logger.info(u\"b\u016b\")\n assert caplog.records[0].levelname == \"INFO\"\n assert caplog.records[0].msg == u\"b\u016b\"\n assert u\"b\u016b\" in caplog.text\n\n\ndef test_clear(caplog):\n caplog.set_level(logging.INFO)\n logger.info(u\"b\u016b\")\n assert len(caplog.records)\n assert caplog.text\n caplog.clear()\n assert not len(caplog.records)\n assert not caplog.text\n\n\n@pytest.fixture\ndef logging_during_setup_and_teardown(caplog):\n caplog.set_level(\"INFO\")\n logger.info(\"a_setup_log\")\n yield\n logger.info(\"a_teardown_log\")\n assert [x.message for x in caplog.get_records(\"teardown\")] == [\"a_teardown_log\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_fixture.py_test_caplog_captures_for_all_stages_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_fixture.py_test_caplog_captures_for_all_stages_", "embedding": null, "metadata": {"file_path": "testing/logging/test_fixture.py", "file_name": "test_fixture.py", "file_type": "text/x-python", "category": "test", "start_line": 131, "end_line": 141, "span_ids": ["test_caplog_captures_for_all_stages"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_caplog_captures_for_all_stages(caplog, logging_during_setup_and_teardown):\n assert not caplog.records\n assert not caplog.get_records(\"call\")\n logger.info(\"a_call_log\")\n assert [x.message for x in caplog.get_records(\"call\")] == [\"a_call_log\"]\n\n assert [x.message for x in caplog.get_records(\"setup\")] == [\"a_setup_log\"]\n\n # This reaches into private API, don't use this type of thing in real tests!\n assert set(caplog._item.catch_log_handlers.keys()) == {\"setup\", \"call\"}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_formatter.py_logging_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_formatter.py_logging_", "embedding": null, "metadata": {"file_path": "testing/logging/test_formatter.py", "file_name": "test_formatter.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 37, "span_ids": ["imports", "test_coloredlogformatter.ColorConfig", "test_coloredlogformatter.ColorConfig.option:2", "test_coloredlogformatter"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import logging\n\nimport py.io\n\nfrom _pytest.logging import ColoredLevelFormatter\n\n\ndef test_coloredlogformatter():\n logfmt = \"%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s\"\n\n record = logging.LogRecord(\n name=\"dummy\",\n level=logging.INFO,\n pathname=\"dummypath\",\n lineno=10,\n msg=\"Test Message\",\n args=(),\n exc_info=False,\n )\n\n class ColorConfig(object):\n class option(object):\n pass\n\n tw = py.io.TerminalWriter()\n tw.hasmarkup = True\n formatter = ColoredLevelFormatter(tw, logfmt)\n output = formatter.format(record)\n assert output == (\n \"dummypath 10 \\x1b[32mINFO \\x1b[0m Test Message\"\n )\n\n tw.hasmarkup = False\n formatter = ColoredLevelFormatter(tw, logfmt)\n output = formatter.format(record)\n assert output == (\"dummypath 10 INFO Test Message\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py__coding_utf_8__test_nothing_logged.with_pytest_raises_pytest.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py__coding_utf_8__test_nothing_logged.with_pytest_raises_pytest.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 27, "span_ids": ["imports", "test_nothing_logged", "docstring"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -*- coding: utf-8 -*-\nimport os\nimport re\nfrom io import open\n\nimport six\n\nimport pytest\n\n\ndef test_nothing_logged(testdir):\n testdir.makepyfile(\n \"\"\"\n import sys\n\n def test_foo():\n sys.stdout.write('text going to stdout')\n sys.stderr.write('text going to stderr')\n assert False\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret == 1\n result.stdout.fnmatch_lines([\"*- Captured stdout call -*\", \"text going to stdout\"])\n result.stdout.fnmatch_lines([\"*- Captured stderr call -*\", \"text going to stderr\"])\n with pytest.raises(pytest.fail.Exception):\n result.stdout.fnmatch_lines([\"*- Captured *log call -*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_messages_logged_test_messages_logged.None_3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_messages_logged_test_messages_logged.None_3", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 30, "end_line": 49, "span_ids": ["test_messages_logged"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_messages_logged(testdir):\n testdir.makepyfile(\n \"\"\"\n import sys\n import logging\n\n logger = logging.getLogger(__name__)\n\n def test_foo():\n sys.stdout.write('text going to stdout')\n sys.stderr.write('text going to stderr')\n logger.info('text going to logger')\n assert False\n \"\"\"\n )\n result = testdir.runpytest(\"--log-level=INFO\")\n assert result.ret == 1\n result.stdout.fnmatch_lines([\"*- Captured *log call -*\", \"*text going to logger*\"])\n result.stdout.fnmatch_lines([\"*- Captured stdout call -*\", \"text going to stdout\"])\n result.stdout.fnmatch_lines([\"*- Captured stderr call -*\", \"text going to stderr\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_root_logger_affected_test_root_logger_affected.with_open_log_file_as_rf.assert_error_text_going_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_root_logger_affected_test_root_logger_affected.with_open_log_file_as_rf.assert_error_text_going_", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 52, "end_line": 85, "span_ids": ["test_root_logger_affected"], "tokens": 300}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_root_logger_affected(testdir):\n testdir.makepyfile(\n \"\"\"\n import logging\n logger = logging.getLogger()\n\n def test_foo():\n logger.info('info text ' + 'going to logger')\n logger.warning('warning text ' + 'going to logger')\n logger.error('error text ' + 'going to logger')\n\n assert 0\n \"\"\"\n )\n log_file = testdir.tmpdir.join(\"pytest.log\").strpath\n result = testdir.runpytest(\"--log-level=ERROR\", \"--log-file=pytest.log\")\n assert result.ret == 1\n\n # The capture log calls in the stdout section only contain the\n # logger.error msg, because of --log-level=ERROR.\n result.stdout.fnmatch_lines([\"*error text going to logger*\"])\n stdout = result.stdout.str()\n assert \"warning text going to logger\" not in stdout\n assert \"info text going to logger\" not in stdout\n\n # The log file should contain the warning and the error log messages and\n # not the info one, because the default level of the root logger is\n # WARNING.\n assert os.path.isfile(log_file)\n with open(log_file) as rfh:\n contents = rfh.read()\n assert \"info text going to logger\" not in contents\n assert \"warning text going to logger\" in contents\n assert \"error text going to logger\" in contents", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_cli_level_log_level_interaction_test_log_cli_level_log_level_interaction.assert_DEBUG_not_in_res": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_cli_level_log_level_interaction_test_log_cli_level_log_level_interaction.assert_DEBUG_not_in_res", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 88, "end_line": 115, "span_ids": ["test_log_cli_level_log_level_interaction"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_log_cli_level_log_level_interaction(testdir):\n testdir.makepyfile(\n \"\"\"\n import logging\n logger = logging.getLogger()\n\n def test_foo():\n logger.debug('debug text ' + 'going to logger')\n logger.info('info text ' + 'going to logger')\n logger.warning('warning text ' + 'going to logger')\n logger.error('error text ' + 'going to logger')\n assert 0\n \"\"\"\n )\n\n result = testdir.runpytest(\"--log-cli-level=INFO\", \"--log-level=ERROR\")\n assert result.ret == 1\n\n result.stdout.fnmatch_lines(\n [\n \"*-- live log call --*\",\n \"*INFO*info text going to logger\",\n \"*WARNING*warning text going to logger\",\n \"*ERROR*error text going to logger\",\n \"=* 1 failed in *=\",\n ]\n )\n assert \"DEBUG\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_setup_logging_test_setup_logging.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_setup_logging_test_setup_logging.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 118, "end_line": 142, "span_ids": ["test_setup_logging"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_setup_logging(testdir):\n testdir.makepyfile(\n \"\"\"\n import logging\n\n logger = logging.getLogger(__name__)\n\n def setup_function(function):\n logger.info('text going to logger from setup')\n\n def test_foo():\n logger.info('text going to logger from call')\n assert False\n \"\"\"\n )\n result = testdir.runpytest(\"--log-level=INFO\")\n assert result.ret == 1\n result.stdout.fnmatch_lines(\n [\n \"*- Captured *log setup -*\",\n \"*text going to logger from setup*\",\n \"*- Captured *log call -*\",\n \"*text going to logger from call*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_teardown_logging_test_teardown_logging.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_teardown_logging_test_teardown_logging.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 145, "end_line": 169, "span_ids": ["test_teardown_logging"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_teardown_logging(testdir):\n testdir.makepyfile(\n \"\"\"\n import logging\n\n logger = logging.getLogger(__name__)\n\n def test_foo():\n logger.info('text going to logger from call')\n\n def teardown_function(function):\n logger.info('text going to logger from teardown')\n assert False\n \"\"\"\n )\n result = testdir.runpytest(\"--log-level=INFO\")\n assert result.ret == 1\n result.stdout.fnmatch_lines(\n [\n \"*- Captured *log call -*\",\n \"*text going to logger from call*\",\n \"*- Captured *log teardown -*\",\n \"*text going to logger from teardown*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_disable_log_capturing_test_disable_log_capturing.with_pytest_raises_pytest.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_disable_log_capturing_test_disable_log_capturing.with_pytest_raises_pytest.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 172, "end_line": 193, "span_ids": ["test_disable_log_capturing"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_disable_log_capturing(testdir):\n testdir.makepyfile(\n \"\"\"\n import sys\n import logging\n\n logger = logging.getLogger(__name__)\n\n def test_foo():\n sys.stdout.write('text going to stdout')\n logger.warning('catch me if you can!')\n sys.stderr.write('text going to stderr')\n assert False\n \"\"\"\n )\n result = testdir.runpytest(\"--no-print-logs\")\n print(result.stdout)\n assert result.ret == 1\n result.stdout.fnmatch_lines([\"*- Captured stdout call -*\", \"text going to stdout\"])\n result.stdout.fnmatch_lines([\"*- Captured stderr call -*\", \"text going to stderr\"])\n with pytest.raises(pytest.fail.Exception):\n result.stdout.fnmatch_lines([\"*- Captured *log call -*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_disable_log_capturing_ini_test_disable_log_capturing_ini.with_pytest_raises_pytest.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_disable_log_capturing_ini_test_disable_log_capturing_ini.with_pytest_raises_pytest.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 196, "end_line": 223, "span_ids": ["test_disable_log_capturing_ini"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_disable_log_capturing_ini(testdir):\n testdir.makeini(\n \"\"\"\n [pytest]\n log_print=False\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n import sys\n import logging\n\n logger = logging.getLogger(__name__)\n\n def test_foo():\n sys.stdout.write('text going to stdout')\n logger.warning('catch me if you can!')\n sys.stderr.write('text going to stderr')\n assert False\n \"\"\"\n )\n result = testdir.runpytest()\n print(result.stdout)\n assert result.ret == 1\n result.stdout.fnmatch_lines([\"*- Captured stdout call -*\", \"text going to stdout\"])\n result.stdout.fnmatch_lines([\"*- Captured stderr call -*\", \"text going to stderr\"])\n with pytest.raises(pytest.fail.Exception):\n result.stdout.fnmatch_lines([\"*- Captured *log call -*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_cli_enabled_disabled_test_log_cli_enabled_disabled.None_1.else_.assert_msg_not_in_result_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_cli_enabled_disabled_test_log_cli_enabled_disabled.None_1.else_.assert_msg_not_in_result_", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 226, "end_line": 256, "span_ids": ["test_log_cli_enabled_disabled"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"enabled\", [True, False])\ndef test_log_cli_enabled_disabled(testdir, enabled):\n msg = \"critical message logged by test\"\n testdir.makepyfile(\n \"\"\"\n import logging\n def test_log_cli():\n logging.critical(\"{}\")\n \"\"\".format(\n msg\n )\n )\n if enabled:\n testdir.makeini(\n \"\"\"\n [pytest]\n log_cli=true\n \"\"\"\n )\n result = testdir.runpytest()\n if enabled:\n result.stdout.fnmatch_lines(\n [\n \"test_log_cli_enabled_disabled.py::test_log_cli \",\n \"*-- live log call --*\",\n \"test_log_cli_enabled_disabled.py* CRITICAL critical message logged by test\",\n \"PASSED*\",\n ]\n )\n else:\n assert msg not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_cli_default_level_test_log_cli_default_level.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_cli_default_level_test_log_cli_default_level.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 259, "end_line": 290, "span_ids": ["test_log_cli_default_level"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_log_cli_default_level(testdir):\n # Default log file level\n testdir.makepyfile(\n \"\"\"\n import pytest\n import logging\n def test_log_cli(request):\n plugin = request.config.pluginmanager.getplugin('logging-plugin')\n assert plugin.log_cli_handler.level == logging.NOTSET\n logging.getLogger('catchlog').info(\"INFO message won't be shown\")\n logging.getLogger('catchlog').warning(\"WARNING message will be shown\")\n \"\"\"\n )\n testdir.makeini(\n \"\"\"\n [pytest]\n log_cli=true\n \"\"\"\n )\n\n result = testdir.runpytest()\n\n # fnmatch_lines does an assertion internally\n result.stdout.fnmatch_lines(\n [\n \"test_log_cli_default_level.py::test_log_cli \",\n \"test_log_cli_default_level.py*WARNING message will be shown*\",\n ]\n )\n assert \"INFO message won't be shown\" not in result.stdout.str()\n # make sure that that we get a '0' exit code for the testsuite\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_cli_default_level_multiple_tests_test_log_cli_default_level_multiple_tests.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_cli_default_level_multiple_tests_test_log_cli_default_level_multiple_tests.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 293, "end_line": 325, "span_ids": ["test_log_cli_default_level_multiple_tests"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_log_cli_default_level_multiple_tests(testdir, request):\n \"\"\"Ensure we reset the first newline added by the live logger between tests\"\"\"\n filename = request.node.name + \".py\"\n testdir.makepyfile(\n \"\"\"\n import logging\n\n def test_log_1():\n logging.warning(\"log message from test_log_1\")\n\n def test_log_2():\n logging.warning(\"log message from test_log_2\")\n \"\"\"\n )\n testdir.makeini(\n \"\"\"\n [pytest]\n log_cli=true\n \"\"\"\n )\n\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"{}::test_log_1 \".format(filename),\n \"*WARNING*log message from test_log_1*\",\n \"PASSED *50%*\",\n \"{}::test_log_2 \".format(filename),\n \"*WARNING*log message from test_log_2*\",\n \"PASSED *100%*\",\n \"=* 2 passed in *=\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_cli_default_level_sections_test_log_cli_default_level_sections.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_cli_default_level_sections_test_log_cli_default_level_sections.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 328, "end_line": 399, "span_ids": ["test_log_cli_default_level_sections"], "tokens": 524}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_log_cli_default_level_sections(testdir, request):\n \"\"\"Check that with live logging enable we are printing the correct headers during\n start/setup/call/teardown/finish.\"\"\"\n filename = request.node.name + \".py\"\n testdir.makeconftest(\n \"\"\"\n import pytest\n import logging\n\n def pytest_runtest_logstart():\n logging.warning('>>>>> START >>>>>')\n\n def pytest_runtest_logfinish():\n logging.warning('<<<<< END <<<<<<<')\n \"\"\"\n )\n\n testdir.makepyfile(\n \"\"\"\n import pytest\n import logging\n\n @pytest.fixture\n def fix(request):\n logging.warning(\"log message from setup of {}\".format(request.node.name))\n yield\n logging.warning(\"log message from teardown of {}\".format(request.node.name))\n\n def test_log_1(fix):\n logging.warning(\"log message from test_log_1\")\n\n def test_log_2(fix):\n logging.warning(\"log message from test_log_2\")\n \"\"\"\n )\n testdir.makeini(\n \"\"\"\n [pytest]\n log_cli=true\n \"\"\"\n )\n\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"{}::test_log_1 \".format(filename),\n \"*-- live log start --*\",\n \"*WARNING* >>>>> START >>>>>*\",\n \"*-- live log setup --*\",\n \"*WARNING*log message from setup of test_log_1*\",\n \"*-- live log call --*\",\n \"*WARNING*log message from test_log_1*\",\n \"PASSED *50%*\",\n \"*-- live log teardown --*\",\n \"*WARNING*log message from teardown of test_log_1*\",\n \"*-- live log finish --*\",\n \"*WARNING* <<<<< END <<<<<<<*\",\n \"{}::test_log_2 \".format(filename),\n \"*-- live log start --*\",\n \"*WARNING* >>>>> START >>>>>*\",\n \"*-- live log setup --*\",\n \"*WARNING*log message from setup of test_log_2*\",\n \"*-- live log call --*\",\n \"*WARNING*log message from test_log_2*\",\n \"PASSED *100%*\",\n \"*-- live log teardown --*\",\n \"*WARNING*log message from teardown of test_log_2*\",\n \"*-- live log finish --*\",\n \"*WARNING* <<<<< END <<<<<<<*\",\n \"=* 2 passed in *=\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_live_logs_unknown_sections_test_live_logs_unknown_sections.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_live_logs_unknown_sections_test_live_logs_unknown_sections.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 402, "end_line": 461, "span_ids": ["test_live_logs_unknown_sections"], "tokens": 381}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_live_logs_unknown_sections(testdir, request):\n \"\"\"Check that with live logging enable we are printing the correct headers during\n start/setup/call/teardown/finish.\"\"\"\n filename = request.node.name + \".py\"\n testdir.makeconftest(\n \"\"\"\n import pytest\n import logging\n\n def pytest_runtest_protocol(item, nextitem):\n logging.warning('Unknown Section!')\n\n def pytest_runtest_logstart():\n logging.warning('>>>>> START >>>>>')\n\n def pytest_runtest_logfinish():\n logging.warning('<<<<< END <<<<<<<')\n \"\"\"\n )\n\n testdir.makepyfile(\n \"\"\"\n import pytest\n import logging\n\n @pytest.fixture\n def fix(request):\n logging.warning(\"log message from setup of {}\".format(request.node.name))\n yield\n logging.warning(\"log message from teardown of {}\".format(request.node.name))\n\n def test_log_1(fix):\n logging.warning(\"log message from test_log_1\")\n\n \"\"\"\n )\n testdir.makeini(\n \"\"\"\n [pytest]\n log_cli=true\n \"\"\"\n )\n\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"*WARNING*Unknown Section*\",\n \"{}::test_log_1 \".format(filename),\n \"*WARNING* >>>>> START >>>>>*\",\n \"*-- live log setup --*\",\n \"*WARNING*log message from setup of test_log_1*\",\n \"*-- live log call --*\",\n \"*WARNING*log message from test_log_1*\",\n \"PASSED *100%*\",\n \"*-- live log teardown --*\",\n \"*WARNING*log message from teardown of test_log_1*\",\n \"*WARNING* <<<<< END <<<<<<<*\",\n \"=* 1 passed in *=\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_sections_single_new_line_after_test_outcome_test_sections_single_new_line_after_test_outcome.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_sections_single_new_line_after_test_outcome_test_sections_single_new_line_after_test_outcome.None_1", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 464, "end_line": 539, "span_ids": ["test_sections_single_new_line_after_test_outcome"], "tokens": 507}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_sections_single_new_line_after_test_outcome(testdir, request):\n \"\"\"Check that only a single new line is written between log messages during\n teardown/finish.\"\"\"\n filename = request.node.name + \".py\"\n testdir.makeconftest(\n \"\"\"\n import pytest\n import logging\n\n def pytest_runtest_logstart():\n logging.warning('>>>>> START >>>>>')\n\n def pytest_runtest_logfinish():\n logging.warning('<<<<< END <<<<<<<')\n logging.warning('<<<<< END <<<<<<<')\n \"\"\"\n )\n\n testdir.makepyfile(\n \"\"\"\n import pytest\n import logging\n\n @pytest.fixture\n def fix(request):\n logging.warning(\"log message from setup of {}\".format(request.node.name))\n yield\n logging.warning(\"log message from teardown of {}\".format(request.node.name))\n logging.warning(\"log message from teardown of {}\".format(request.node.name))\n\n def test_log_1(fix):\n logging.warning(\"log message from test_log_1\")\n \"\"\"\n )\n testdir.makeini(\n \"\"\"\n [pytest]\n log_cli=true\n \"\"\"\n )\n\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"{}::test_log_1 \".format(filename),\n \"*-- live log start --*\",\n \"*WARNING* >>>>> START >>>>>*\",\n \"*-- live log setup --*\",\n \"*WARNING*log message from setup of test_log_1*\",\n \"*-- live log call --*\",\n \"*WARNING*log message from test_log_1*\",\n \"PASSED *100%*\",\n \"*-- live log teardown --*\",\n \"*WARNING*log message from teardown of test_log_1*\",\n \"*-- live log finish --*\",\n \"*WARNING* <<<<< END <<<<<<<*\",\n \"*WARNING* <<<<< END <<<<<<<*\",\n \"=* 1 passed in *=\",\n ]\n )\n assert (\n re.search(\n r\"(.+)live log teardown(.+)\\n(.+)WARNING(.+)\\n(.+)WARNING(.+)\",\n result.stdout.str(),\n re.MULTILINE,\n )\n is not None\n )\n assert (\n re.search(\n r\"(.+)live log finish(.+)\\n(.+)WARNING(.+)\\n(.+)WARNING(.+)\",\n result.stdout.str(),\n re.MULTILINE,\n )\n is not None\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_cli_level_test_log_cli_level.None_3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_cli_level_test_log_cli_level.None_3", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 542, "end_line": 589, "span_ids": ["test_log_cli_level"], "tokens": 376}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_log_cli_level(testdir):\n # Default log file level\n testdir.makepyfile(\n \"\"\"\n import pytest\n import logging\n def test_log_cli(request):\n plugin = request.config.pluginmanager.getplugin('logging-plugin')\n assert plugin.log_cli_handler.level == logging.INFO\n logging.getLogger('catchlog').debug(\"This log message won't be shown\")\n logging.getLogger('catchlog').info(\"This log message will be shown\")\n print('PASSED')\n \"\"\"\n )\n testdir.makeini(\n \"\"\"\n [pytest]\n log_cli=true\n \"\"\"\n )\n\n result = testdir.runpytest(\"-s\", \"--log-cli-level=INFO\")\n\n # fnmatch_lines does an assertion internally\n result.stdout.fnmatch_lines(\n [\n \"test_log_cli_level.py*This log message will be shown\",\n \"PASSED\", # 'PASSED' on its own line because the log message prints a new line\n ]\n )\n assert \"This log message won't be shown\" not in result.stdout.str()\n\n # make sure that that we get a '0' exit code for the testsuite\n assert result.ret == 0\n\n result = testdir.runpytest(\"-s\", \"--log-level=INFO\")\n\n # fnmatch_lines does an assertion internally\n result.stdout.fnmatch_lines(\n [\n \"test_log_cli_level.py* This log message will be shown\",\n \"PASSED\", # 'PASSED' on its own line because the log message prints a new line\n ]\n )\n assert \"This log message won't be shown\" not in result.stdout.str()\n\n # make sure that that we get a '0' exit code for the testsuite\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_cli_ini_level_test_log_cli_ini_level.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_cli_ini_level_test_log_cli_ini_level.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 592, "end_line": 625, "span_ids": ["test_log_cli_ini_level"], "tokens": 247}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_log_cli_ini_level(testdir):\n testdir.makeini(\n \"\"\"\n [pytest]\n log_cli=true\n log_cli_level = INFO\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n import pytest\n import logging\n def test_log_cli(request):\n plugin = request.config.pluginmanager.getplugin('logging-plugin')\n assert plugin.log_cli_handler.level == logging.INFO\n logging.getLogger('catchlog').debug(\"This log message won't be shown\")\n logging.getLogger('catchlog').info(\"This log message will be shown\")\n print('PASSED')\n \"\"\"\n )\n\n result = testdir.runpytest(\"-s\")\n\n # fnmatch_lines does an assertion internally\n result.stdout.fnmatch_lines(\n [\n \"test_log_cli_ini_level.py* This log message will be shown\",\n \"PASSED\", # 'PASSED' on its own line because the log message prints a new line\n ]\n )\n assert \"This log message won't be shown\" not in result.stdout.str()\n\n # make sure that that we get a '0' exit code for the testsuite\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_cli_auto_enable_test_log_cli_auto_enable.if_cli_args_log_cli.else_.assert_WARNING_not_in_s": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_cli_auto_enable_test_log_cli_auto_enable.if_cli_args_log_cli.else_.assert_WARNING_not_in_s", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 628, "end_line": 672, "span_ids": ["test_log_cli_auto_enable"], "tokens": 322}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"cli_args\",\n [\"\", \"--log-level=WARNING\", \"--log-file-level=WARNING\", \"--log-cli-level=WARNING\"],\n)\ndef test_log_cli_auto_enable(testdir, request, cli_args):\n \"\"\"Check that live logs are enabled if --log-level or --log-cli-level is passed on the CLI.\n It should not be auto enabled if the same configs are set on the INI file.\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n import logging\n\n def test_log_1():\n logging.info(\"log message from test_log_1 not to be shown\")\n logging.warning(\"log message from test_log_1\")\n\n \"\"\"\n )\n testdir.makeini(\n \"\"\"\n [pytest]\n log_level=INFO\n log_cli_level=INFO\n \"\"\"\n )\n\n result = testdir.runpytest(cli_args)\n stdout = result.stdout.str()\n if cli_args == \"--log-cli-level=WARNING\":\n result.stdout.fnmatch_lines(\n [\n \"*::test_log_1 \",\n \"*-- live log call --*\",\n \"*WARNING*log message from test_log_1*\",\n \"PASSED *100%*\",\n \"=* 1 passed in *=\",\n ]\n )\n assert \"INFO\" not in stdout\n else:\n result.stdout.fnmatch_lines(\n [\"*test_log_cli_auto_enable*100%*\", \"=* 1 passed in *=\"]\n )\n assert \"INFO\" not in stdout\n assert \"WARNING\" not in stdout", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_file_cli_test_log_file_cli.with_open_log_file_as_rf.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_file_cli_test_log_file_cli.with_open_log_file_as_rf.None_1", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 675, "end_line": 705, "span_ids": ["test_log_file_cli"], "tokens": 257}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_log_file_cli(testdir):\n # Default log file level\n testdir.makepyfile(\n \"\"\"\n import pytest\n import logging\n def test_log_file(request):\n plugin = request.config.pluginmanager.getplugin('logging-plugin')\n assert plugin.log_file_handler.level == logging.WARNING\n logging.getLogger('catchlog').info(\"This log message won't be shown\")\n logging.getLogger('catchlog').warning(\"This log message will be shown\")\n print('PASSED')\n \"\"\"\n )\n\n log_file = testdir.tmpdir.join(\"pytest.log\").strpath\n\n result = testdir.runpytest(\n \"-s\", \"--log-file={}\".format(log_file), \"--log-file-level=WARNING\"\n )\n\n # fnmatch_lines does an assertion internally\n result.stdout.fnmatch_lines([\"test_log_file_cli.py PASSED\"])\n\n # make sure that that we get a '0' exit code for the testsuite\n assert result.ret == 0\n assert os.path.isfile(log_file)\n with open(log_file) as rfh:\n contents = rfh.read()\n assert \"This log message will be shown\" in contents\n assert \"This log message won't be shown\" not in contents", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_file_cli_level_test_log_level_not_changed_by_default.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_file_cli_level_test_log_level_not_changed_by_default.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 708, "end_line": 750, "span_ids": ["test_log_file_cli_level", "test_log_level_not_changed_by_default"], "tokens": 328}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_log_file_cli_level(testdir):\n # Default log file level\n testdir.makepyfile(\n \"\"\"\n import pytest\n import logging\n def test_log_file(request):\n plugin = request.config.pluginmanager.getplugin('logging-plugin')\n assert plugin.log_file_handler.level == logging.INFO\n logging.getLogger('catchlog').debug(\"This log message won't be shown\")\n logging.getLogger('catchlog').info(\"This log message will be shown\")\n print('PASSED')\n \"\"\"\n )\n\n log_file = testdir.tmpdir.join(\"pytest.log\").strpath\n\n result = testdir.runpytest(\n \"-s\", \"--log-file={}\".format(log_file), \"--log-file-level=INFO\"\n )\n\n # fnmatch_lines does an assertion internally\n result.stdout.fnmatch_lines([\"test_log_file_cli_level.py PASSED\"])\n\n # make sure that that we get a '0' exit code for the testsuite\n assert result.ret == 0\n assert os.path.isfile(log_file)\n with open(log_file) as rfh:\n contents = rfh.read()\n assert \"This log message will be shown\" in contents\n assert \"This log message won't be shown\" not in contents\n\n\ndef test_log_level_not_changed_by_default(testdir):\n testdir.makepyfile(\n \"\"\"\n import logging\n def test_log_file():\n assert logging.getLogger().level == logging.WARNING\n \"\"\"\n )\n result = testdir.runpytest(\"-s\")\n result.stdout.fnmatch_lines([\"* 1 passed in *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_file_ini_test_log_file_ini.with_open_log_file_as_rf.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_file_ini_test_log_file_ini.with_open_log_file_as_rf.None_1", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 753, "end_line": 789, "span_ids": ["test_log_file_ini"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_log_file_ini(testdir):\n log_file = testdir.tmpdir.join(\"pytest.log\").strpath\n\n testdir.makeini(\n \"\"\"\n [pytest]\n log_file={}\n log_file_level=WARNING\n \"\"\".format(\n log_file\n )\n )\n testdir.makepyfile(\n \"\"\"\n import pytest\n import logging\n def test_log_file(request):\n plugin = request.config.pluginmanager.getplugin('logging-plugin')\n assert plugin.log_file_handler.level == logging.WARNING\n logging.getLogger('catchlog').info(\"This log message won't be shown\")\n logging.getLogger('catchlog').warning(\"This log message will be shown\")\n print('PASSED')\n \"\"\"\n )\n\n result = testdir.runpytest(\"-s\")\n\n # fnmatch_lines does an assertion internally\n result.stdout.fnmatch_lines([\"test_log_file_ini.py PASSED\"])\n\n # make sure that that we get a '0' exit code for the testsuite\n assert result.ret == 0\n assert os.path.isfile(log_file)\n with open(log_file) as rfh:\n contents = rfh.read()\n assert \"This log message will be shown\" in contents\n assert \"This log message won't be shown\" not in contents", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_file_ini_level_test_log_file_ini_level.with_open_log_file_as_rf.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_file_ini_level_test_log_file_ini_level.with_open_log_file_as_rf.None_1", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 792, "end_line": 828, "span_ids": ["test_log_file_ini_level"], "tokens": 267}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_log_file_ini_level(testdir):\n log_file = testdir.tmpdir.join(\"pytest.log\").strpath\n\n testdir.makeini(\n \"\"\"\n [pytest]\n log_file={}\n log_file_level = INFO\n \"\"\".format(\n log_file\n )\n )\n testdir.makepyfile(\n \"\"\"\n import pytest\n import logging\n def test_log_file(request):\n plugin = request.config.pluginmanager.getplugin('logging-plugin')\n assert plugin.log_file_handler.level == logging.INFO\n logging.getLogger('catchlog').debug(\"This log message won't be shown\")\n logging.getLogger('catchlog').info(\"This log message will be shown\")\n print('PASSED')\n \"\"\"\n )\n\n result = testdir.runpytest(\"-s\")\n\n # fnmatch_lines does an assertion internally\n result.stdout.fnmatch_lines([\"test_log_file_ini_level.py PASSED\"])\n\n # make sure that that we get a '0' exit code for the testsuite\n assert result.ret == 0\n assert os.path.isfile(log_file)\n with open(log_file) as rfh:\n contents = rfh.read()\n assert \"This log message will be shown\" in contents\n assert \"This log message won't be shown\" not in contents", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_file_unicode_test_log_file_unicode.with_open_log_file_encod.assert_Another_normal_me": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_file_unicode_test_log_file_unicode.with_open_log_file_encod.assert_Another_normal_me", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 831, "end_line": 865, "span_ids": ["test_log_file_unicode"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_log_file_unicode(testdir):\n log_file = testdir.tmpdir.join(\"pytest.log\").strpath\n\n testdir.makeini(\n \"\"\"\n [pytest]\n log_file={}\n log_file_level = INFO\n \"\"\".format(\n log_file\n )\n )\n testdir.makepyfile(\n \"\"\"\n # -*- coding: utf-8 -*-\n from __future__ import unicode_literals\n import logging\n\n def test_log_file():\n logging.getLogger('catchlog').info(\"Normal message\")\n logging.getLogger('catchlog').info(\"\u251c\")\n logging.getLogger('catchlog').info(\"Another normal message\")\n \"\"\"\n )\n\n result = testdir.runpytest()\n\n # make sure that that we get a '0' exit code for the testsuite\n assert result.ret == 0\n assert os.path.isfile(log_file)\n with open(log_file, encoding=\"utf-8\") as rfh:\n contents = rfh.read()\n assert \"Normal message\" in contents\n assert u\"\u251c\" in contents\n assert \"Another normal message\" in contents", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_live_logging_suspends_capture_test_live_logging_suspends_capture.assert_out_file_getvalue_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_live_logging_suspends_capture_test_live_logging_suspends_capture.assert_out_file_getvalue_", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 868, "end_line": 910, "span_ids": ["test_live_logging_suspends_capture.DummyTerminal.section", "test_live_logging_suspends_capture", "test_live_logging_suspends_capture.MockCaptureManager:2", "test_live_logging_suspends_capture.DummyTerminal", "test_live_logging_suspends_capture.MockCaptureManager"], "tokens": 331}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"has_capture_manager\", [True, False])\ndef test_live_logging_suspends_capture(has_capture_manager, request):\n \"\"\"Test that capture manager is suspended when we emitting messages for live logging.\n\n This tests the implementation calls instead of behavior because it is difficult/impossible to do it using\n ``testdir`` facilities because they do their own capturing.\n\n We parametrize the test to also make sure _LiveLoggingStreamHandler works correctly if no capture manager plugin\n is installed.\n \"\"\"\n import logging\n import contextlib\n from functools import partial\n from _pytest.logging import _LiveLoggingStreamHandler\n\n class MockCaptureManager:\n calls = []\n\n @contextlib.contextmanager\n def global_and_fixture_disabled(self):\n self.calls.append(\"enter disabled\")\n yield\n self.calls.append(\"exit disabled\")\n\n class DummyTerminal(six.StringIO):\n def section(self, *args, **kwargs):\n pass\n\n out_file = DummyTerminal()\n capture_manager = MockCaptureManager() if has_capture_manager else None\n handler = _LiveLoggingStreamHandler(out_file, capture_manager)\n handler.set_when(\"call\")\n\n logger = logging.getLogger(__name__ + \".test_live_logging_suspends_capture\")\n logger.addHandler(handler)\n request.addfinalizer(partial(logger.removeHandler, handler))\n\n logger.critical(\"some message\")\n if has_capture_manager:\n assert MockCaptureManager.calls == [\"enter disabled\", \"exit disabled\"]\n else:\n assert MockCaptureManager.calls == []\n assert out_file.getvalue() == \"\\nsome message\\n\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_collection_live_logging_test_collection_logging_to_file.with_open_log_file_encod.assert_info_message_in_t": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_collection_live_logging_test_collection_logging_to_file.with_open_log_file_encod.assert_info_message_in_t", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 913, "end_line": 968, "span_ids": ["test_collection_logging_to_file", "test_collection_live_logging"], "tokens": 299}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_collection_live_logging(testdir):\n testdir.makepyfile(\n \"\"\"\n import logging\n\n logging.getLogger().info(\"Normal message\")\n \"\"\"\n )\n\n result = testdir.runpytest(\"--log-cli-level=INFO\")\n result.stdout.fnmatch_lines(\n [\n \"collecting*\",\n \"*--- live log collection ---*\",\n \"*Normal message*\",\n \"collected 0 items\",\n ]\n )\n\n\ndef test_collection_logging_to_file(testdir):\n log_file = testdir.tmpdir.join(\"pytest.log\").strpath\n\n testdir.makeini(\n \"\"\"\n [pytest]\n log_file={}\n log_file_level = INFO\n \"\"\".format(\n log_file\n )\n )\n\n testdir.makepyfile(\n \"\"\"\n import logging\n\n logging.getLogger().info(\"Normal message\")\n\n def test_simple():\n logging.getLogger().debug(\"debug message in test_simple\")\n logging.getLogger().info(\"info message in test_simple\")\n \"\"\"\n )\n\n result = testdir.runpytest()\n\n assert \"--- live log collection ---\" not in result.stdout.str()\n\n assert result.ret == 0\n assert os.path.isfile(log_file)\n with open(log_file, encoding=\"utf-8\") as rfh:\n contents = rfh.read()\n assert \"Normal message\" in contents\n assert \"debug message in test_simple\" not in contents\n assert \"info message in test_simple\" in contents", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_in_hooks_test_log_in_hooks.with_open_log_file_as_rf.assert_sessionfinish_in": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_in_hooks_test_log_in_hooks.with_open_log_file_as_rf.assert_sessionfinish_in", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 971, "end_line": 1004, "span_ids": ["test_log_in_hooks"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_log_in_hooks(testdir):\n log_file = testdir.tmpdir.join(\"pytest.log\").strpath\n\n testdir.makeini(\n \"\"\"\n [pytest]\n log_file={}\n log_file_level = INFO\n log_cli=true\n \"\"\".format(\n log_file\n )\n )\n testdir.makeconftest(\n \"\"\"\n import logging\n\n def pytest_runtestloop(session):\n logging.info('runtestloop')\n\n def pytest_sessionstart(session):\n logging.info('sessionstart')\n\n def pytest_sessionfinish(session, exitstatus):\n logging.info('sessionfinish')\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*sessionstart*\", \"*runtestloop*\", \"*sessionfinish*\"])\n with open(log_file) as rfh:\n contents = rfh.read()\n assert \"sessionstart\" in contents\n assert \"runtestloop\" in contents\n assert \"sessionfinish\" in contents", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_in_runtest_logreport_test_log_in_runtest_logreport.with_open_log_file_as_rf.assert_contents_count_lo": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_in_runtest_logreport_test_log_in_runtest_logreport.with_open_log_file_as_rf.assert_contents_count_lo", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 1007, "end_line": 1038, "span_ids": ["test_log_in_runtest_logreport"], "tokens": 165}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_log_in_runtest_logreport(testdir):\n log_file = testdir.tmpdir.join(\"pytest.log\").strpath\n\n testdir.makeini(\n \"\"\"\n [pytest]\n log_file={}\n log_file_level = INFO\n log_cli=true\n \"\"\".format(\n log_file\n )\n )\n testdir.makeconftest(\n \"\"\"\n import logging\n logger = logging.getLogger(__name__)\n\n def pytest_runtest_logreport(report):\n logger.info(\"logreport\")\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n def test_first():\n assert True\n \"\"\"\n )\n testdir.runpytest()\n with open(log_file) as rfh:\n contents = rfh.read()\n assert contents.count(\"logreport\") == 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_set_path_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/logging/test_reporting.py_test_log_set_path_", "embedding": null, "metadata": {"file_path": "testing/logging/test_reporting.py", "file_name": "test_reporting.py", "file_type": "text/x-python", "category": "test", "start_line": 1041, "end_line": 1087, "span_ids": ["test_log_set_path"], "tokens": 299}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_log_set_path(testdir):\n report_dir_base = testdir.tmpdir.strpath\n\n testdir.makeini(\n \"\"\"\n [pytest]\n log_file_level = DEBUG\n log_cli=true\n \"\"\"\n )\n testdir.makeconftest(\n \"\"\"\n import os\n import pytest\n @pytest.hookimpl(hookwrapper=True, tryfirst=True)\n def pytest_runtest_setup(item):\n config = item.config\n logging_plugin = config.pluginmanager.get_plugin(\"logging-plugin\")\n report_file = os.path.join({}, item._request.node.name)\n logging_plugin.set_log_path(report_file)\n yield\n \"\"\".format(\n repr(report_dir_base)\n )\n )\n testdir.makepyfile(\n \"\"\"\n import logging\n logger = logging.getLogger(\"testcase-logger\")\n def test_first():\n logger.info(\"message from test 1\")\n assert True\n\n def test_second():\n logger.debug(\"message from test 2\")\n assert True\n \"\"\"\n )\n testdir.runpytest()\n with open(os.path.join(report_dir_base, \"test_first\"), \"r\") as rfh:\n content = rfh.read()\n assert \"message from test 1\" in content\n\n with open(os.path.join(report_dir_base, \"test_second\"), \"r\") as rfh:\n content = rfh.read()\n assert \"message from test 2\" in content", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py__encoding_utf_8_MyDocTestRunner.report_failure.raise_AssertionError_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py__encoding_utf_8_MyDocTestRunner.report_failure.raise_AssertionError_", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 25, "span_ids": ["impl", "MyDocTestRunner.report_failure", "docstring", "imports", "MyDocTestRunner"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# encoding: utf-8\nimport doctest\nimport operator\nimport sys\nfrom decimal import Decimal\nfrom fractions import Fraction\nfrom operator import eq\nfrom operator import ne\n\nimport pytest\nfrom pytest import approx\n\ninf, nan = float(\"inf\"), float(\"nan\")\n\n\nclass MyDocTestRunner(doctest.DocTestRunner):\n def __init__(self):\n doctest.DocTestRunner.__init__(self)\n\n def report_failure(self, out, test, example, got):\n raise AssertionError(\n \"'{}' evaluates to '{}', not '{}'\".format(\n example.source.strip(), got.strip(), example.want.strip()\n )\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox_TestApprox.test_repr_string.assert_repr_approx_a_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox_TestApprox.test_repr_string.assert_repr_approx_a_", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 28, "end_line": 61, "span_ids": ["TestApprox.test_repr_string", "TestApprox.plus_minus", "TestApprox"], "tokens": 500}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n @pytest.fixture\n def plus_minus(self):\n return u\"\\u00b1\" if sys.version_info[0] > 2 else u\"+-\"\n\n def test_repr_string(self, plus_minus):\n tol1, tol2, infr = \"1.0e-06\", \"2.0e-06\", \"inf\"\n assert repr(approx(1.0)) == \"1.0 {pm} {tol1}\".format(pm=plus_minus, tol1=tol1)\n assert repr(\n approx([1.0, 2.0])\n ) == \"approx([1.0 {pm} {tol1}, 2.0 {pm} {tol2}])\".format(\n pm=plus_minus, tol1=tol1, tol2=tol2\n )\n assert repr(\n approx((1.0, 2.0))\n ) == \"approx((1.0 {pm} {tol1}, 2.0 {pm} {tol2}))\".format(\n pm=plus_minus, tol1=tol1, tol2=tol2\n )\n assert repr(approx(inf)) == \"inf\"\n assert repr(approx(1.0, rel=nan)) == \"1.0 {pm} ???\".format(pm=plus_minus)\n assert repr(approx(1.0, rel=inf)) == \"1.0 {pm} {infr}\".format(\n pm=plus_minus, infr=infr\n )\n assert repr(approx(1.0j, rel=inf)) == \"1j\"\n\n # Dictionaries aren't ordered, so we need to check both orders.\n assert repr(approx({\"a\": 1.0, \"b\": 2.0})) in (\n \"approx({{'a': 1.0 {pm} {tol1}, 'b': 2.0 {pm} {tol2}}})\".format(\n pm=plus_minus, tol1=tol1, tol2=tol2\n ),\n \"approx({{'b': 2.0 {pm} {tol2}, 'a': 1.0 {pm} {tol1}}})\".format(\n pm=plus_minus, tol1=tol1, tol2=tol2\n ),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_repr_nd_array_TestApprox.test_repr_nd_array.assert_repr_approx_np_arr": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_repr_nd_array_TestApprox.test_repr_nd_array.assert_repr_approx_np_arr", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 63, "end_line": 77, "span_ids": ["TestApprox.test_repr_nd_array"], "tokens": 250}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n @pytest.mark.parametrize(\n \"value, repr_string\",\n [\n (5.0, \"approx(5.0 {pm} 5.0e-06)\"),\n ([5.0], \"approx([5.0 {pm} 5.0e-06])\"),\n ([[5.0]], \"approx([[5.0 {pm} 5.0e-06]])\"),\n ([[5.0, 6.0]], \"approx([[5.0 {pm} 5.0e-06, 6.0 {pm} 6.0e-06]])\"),\n ([[5.0], [6.0]], \"approx([[5.0 {pm} 5.0e-06], [6.0 {pm} 6.0e-06]])\"),\n ],\n )\n def test_repr_nd_array(self, plus_minus, value, repr_string):\n \"\"\"Make sure that arrays of all different dimensions are repr'd correctly.\"\"\"\n np = pytest.importorskip(\"numpy\")\n np_array = np.array(value)\n assert repr(approx(np_array)) == repr_string.format(pm=plus_minus)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_operator_overloading_TestApprox.test_exactly_equal.for_a_x_in_examples_.assert_a_approx_x_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_operator_overloading_TestApprox.test_exactly_equal.for_a_x_in_examples_.assert_a_approx_x_", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 79, "end_line": 97, "span_ids": ["TestApprox.test_exactly_equal", "TestApprox.test_operator_overloading"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_operator_overloading(self):\n assert 1 == approx(1, rel=1e-6, abs=1e-12)\n assert not (1 != approx(1, rel=1e-6, abs=1e-12))\n assert 10 != approx(1, rel=1e-6, abs=1e-12)\n assert not (10 == approx(1, rel=1e-6, abs=1e-12))\n\n def test_exactly_equal(self):\n examples = [\n (2.0, 2.0),\n (0.1e200, 0.1e200),\n (1.123e-300, 1.123e-300),\n (12345, 12345.0),\n (0.0, -0.0),\n (345678, 345678),\n (Decimal(\"1.0001\"), Decimal(\"1.0001\")),\n (Fraction(1, 3), Fraction(-1, -3)),\n ]\n for a, x in examples:\n assert a == approx(x)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_opposite_sign_TestApprox.test_zero_tolerance.for_a_x_in_within_1e10_.None_5": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_opposite_sign_TestApprox.test_zero_tolerance.for_a_x_in_within_1e10_.None_5", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 99, "end_line": 112, "span_ids": ["TestApprox.test_zero_tolerance", "TestApprox.test_opposite_sign"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_opposite_sign(self):\n examples = [(eq, 1e-100, -1e-100), (ne, 1e100, -1e100)]\n for op, a, x in examples:\n assert op(a, approx(x))\n\n def test_zero_tolerance(self):\n within_1e10 = [(1.1e-100, 1e-100), (-1.1e-100, -1e-100)]\n for a, x in within_1e10:\n assert x == approx(x, rel=0.0, abs=0.0)\n assert a != approx(x, rel=0.0, abs=0.0)\n assert a == approx(x, rel=0.0, abs=5e-101)\n assert a != approx(x, rel=0.0, abs=5e-102)\n assert a == approx(x, rel=5e-1, abs=0.0)\n assert a != approx(x, rel=5e-2, abs=0.0)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_negative_tolerance_TestApprox.test_negative_tolerance.for_kwargs_in_illegal_kwa.with_pytest_raises_ValueE.1_1_approx_1_kwargs": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_negative_tolerance_TestApprox.test_negative_tolerance.for_kwargs_in_illegal_kwa.with_pytest_raises_ValueE.1_1_approx_1_kwargs", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 114, "end_line": 125, "span_ids": ["TestApprox.test_negative_tolerance"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_negative_tolerance(self):\n # Negative tolerances are not allowed.\n illegal_kwargs = [\n dict(rel=-1e100),\n dict(abs=-1e100),\n dict(rel=1e100, abs=-1e100),\n dict(rel=-1e100, abs=1e100),\n dict(rel=-1e100, abs=-1e100),\n ]\n for kwargs in illegal_kwargs:\n with pytest.raises(ValueError):\n 1.1 == approx(1, **kwargs)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_inf_tolerance_TestApprox.test_inf_tolerance.for_a_x_in_large_diffs_.None_3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_inf_tolerance_TestApprox.test_inf_tolerance.for_a_x_in_large_diffs_.None_3", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 127, "end_line": 134, "span_ids": ["TestApprox.test_inf_tolerance"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_inf_tolerance(self):\n # Everything should be equal if the tolerance is infinite.\n large_diffs = [(1, 1000), (1e-50, 1e50), (-1.0, -1e300), (0.0, 10)]\n for a, x in large_diffs:\n assert a != approx(x, rel=0.0, abs=0.0)\n assert a == approx(x, rel=inf, abs=0.0)\n assert a == approx(x, rel=0.0, abs=inf)\n assert a == approx(x, rel=inf, abs=inf)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_inf_tolerance_expecting_zero_TestApprox.test_reasonable_defaults.assert_0_1_0_2_appro": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_inf_tolerance_expecting_zero_TestApprox.test_reasonable_defaults.assert_0_1_0_2_appro", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 136, "end_line": 153, "span_ids": ["TestApprox.test_nan_tolerance", "TestApprox.test_inf_tolerance_expecting_zero", "TestApprox.test_reasonable_defaults"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_inf_tolerance_expecting_zero(self):\n # If the relative tolerance is zero but the expected value is infinite,\n # the actual tolerance is a NaN, which should be an error.\n illegal_kwargs = [dict(rel=inf, abs=0.0), dict(rel=inf, abs=inf)]\n for kwargs in illegal_kwargs:\n with pytest.raises(ValueError):\n 1 == approx(0, **kwargs)\n\n def test_nan_tolerance(self):\n illegal_kwargs = [dict(rel=nan), dict(abs=nan), dict(rel=nan, abs=nan)]\n for kwargs in illegal_kwargs:\n with pytest.raises(ValueError):\n 1.1 == approx(1, **kwargs)\n\n def test_reasonable_defaults(self):\n # Whatever the defaults are, they should work for numbers close to 1\n # than have a small amount of floating-point error.\n assert 0.1 + 0.2 == approx(0.3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_default_tolerances_TestApprox.test_default_tolerances.for_op_a_x_in_examples_.assert_op_a_approx_x_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_default_tolerances_TestApprox.test_default_tolerances.for_op_a_x_in_examples_.assert_op_a_approx_x_", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 155, "end_line": 172, "span_ids": ["TestApprox.test_default_tolerances"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_default_tolerances(self):\n # This tests the defaults as they are currently set. If you change the\n # defaults, this test will fail but you should feel free to change it.\n # None of the other tests (except the doctests) should be affected by\n # the choice of defaults.\n examples = [\n # Relative tolerance used.\n (eq, 1e100 + 1e94, 1e100),\n (ne, 1e100 + 2e94, 1e100),\n (eq, 1e0 + 1e-6, 1e0),\n (ne, 1e0 + 2e-6, 1e0),\n # Absolute tolerance used.\n (eq, 1e-100, +1e-106),\n (eq, 1e-100, +2e-106),\n (eq, 1e-100, 0),\n ]\n for op, a, x in examples:\n assert op(a, approx(x))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_custom_tolerances_TestApprox.test_custom_tolerances.None_11": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_custom_tolerances_TestApprox.test_custom_tolerances.None_11", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 174, "end_line": 188, "span_ids": ["TestApprox.test_custom_tolerances"], "tokens": 412}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_custom_tolerances(self):\n assert 1e8 + 1e0 == approx(1e8, rel=5e-8, abs=5e0)\n assert 1e8 + 1e0 == approx(1e8, rel=5e-9, abs=5e0)\n assert 1e8 + 1e0 == approx(1e8, rel=5e-8, abs=5e-1)\n assert 1e8 + 1e0 != approx(1e8, rel=5e-9, abs=5e-1)\n\n assert 1e0 + 1e-8 == approx(1e0, rel=5e-8, abs=5e-8)\n assert 1e0 + 1e-8 == approx(1e0, rel=5e-9, abs=5e-8)\n assert 1e0 + 1e-8 == approx(1e0, rel=5e-8, abs=5e-9)\n assert 1e0 + 1e-8 != approx(1e0, rel=5e-9, abs=5e-9)\n\n assert 1e-8 + 1e-16 == approx(1e-8, rel=5e-8, abs=5e-16)\n assert 1e-8 + 1e-16 == approx(1e-8, rel=5e-9, abs=5e-16)\n assert 1e-8 + 1e-16 == approx(1e-8, rel=5e-8, abs=5e-17)\n assert 1e-8 + 1e-16 != approx(1e-8, rel=5e-9, abs=5e-17)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_relative_tolerance_TestApprox.test_relative_tolerance.for_a_x_in_within_1e8_re.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_relative_tolerance_TestApprox.test_relative_tolerance.for_a_x_in_within_1e8_re.None_1", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 190, "end_line": 194, "span_ids": ["TestApprox.test_relative_tolerance"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_relative_tolerance(self):\n within_1e8_rel = [(1e8 + 1e0, 1e8), (1e0 + 1e-8, 1e0), (1e-8 + 1e-16, 1e-8)]\n for a, x in within_1e8_rel:\n assert a == approx(x, rel=5e-8, abs=0.0)\n assert a != approx(x, rel=5e-9, abs=0.0)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_absolute_tolerance_TestApprox.test_absolute_tolerance.for_a_x_in_within_1e8_ab.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_absolute_tolerance_TestApprox.test_absolute_tolerance.for_a_x_in_within_1e8_ab.None_1", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 196, "end_line": 200, "span_ids": ["TestApprox.test_absolute_tolerance"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_absolute_tolerance(self):\n within_1e8_abs = [(1e8 + 9e-9, 1e8), (1e0 + 9e-9, 1e0), (1e-8 + 9e-9, 1e-8)]\n for a, x in within_1e8_abs:\n assert a == approx(x, rel=0, abs=5e-8)\n assert a != approx(x, rel=0, abs=5e-9)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_expecting_zero_TestApprox.test_expecting_zero.for_op_a_x_in_examples_.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_expecting_zero_TestApprox.test_expecting_zero.for_op_a_x_in_examples_.None_1", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 202, "end_line": 215, "span_ids": ["TestApprox.test_expecting_zero"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_expecting_zero(self):\n examples = [\n (ne, 1e-6, 0.0),\n (ne, -1e-6, 0.0),\n (eq, 1e-12, 0.0),\n (eq, -1e-12, 0.0),\n (ne, 2e-12, 0.0),\n (ne, -2e-12, 0.0),\n (ne, inf, 0.0),\n (ne, nan, 0.0),\n ]\n for op, a, x in examples:\n assert op(a, approx(x, rel=0.0, abs=1e-12))\n assert op(a, approx(x, rel=1e-6, abs=1e-12))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_expecting_inf_TestApprox.test_expecting_nan.for_op_a_x_in_examples_.assert_op_a_approx_x_na": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_expecting_inf_TestApprox.test_expecting_nan.for_op_a_x_in_examples_.assert_op_a_approx_x_na", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 217, "end_line": 241, "span_ids": ["TestApprox.test_expecting_inf", "TestApprox.test_expecting_nan"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_expecting_inf(self):\n examples = [\n (eq, inf, inf),\n (eq, -inf, -inf),\n (ne, inf, -inf),\n (ne, 0.0, inf),\n (ne, nan, inf),\n ]\n for op, a, x in examples:\n assert op(a, approx(x))\n\n def test_expecting_nan(self):\n examples = [\n (eq, nan, nan),\n (eq, -nan, -nan),\n (eq, nan, -nan),\n (ne, 0.0, nan),\n (ne, inf, nan),\n ]\n for op, a, x in examples:\n # Nothing is equal to NaN by default.\n assert a != approx(x)\n\n # If ``nan_ok=True``, then NaN is equal to NaN.\n assert op(a, approx(x, nan_ok=True))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_int_TestApprox.test_int.for_a_x_in_within_1e6_.assert_approx_x_rel_5e_7": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_int_TestApprox.test_int.for_a_x_in_within_1e6_.assert_approx_x_rel_5e_7", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 243, "end_line": 249, "span_ids": ["TestApprox.test_int"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_int(self):\n within_1e6 = [(1000001, 1000000), (-1000001, -1000000)]\n for a, x in within_1e6:\n assert a == approx(x, rel=5e-6, abs=0)\n assert a != approx(x, rel=5e-7, abs=0)\n assert approx(x, rel=5e-6, abs=0) == a\n assert approx(x, rel=5e-7, abs=0) != a", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_decimal_TestApprox.test_decimal.for_a_x_in_within_1e6_.None_4": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_decimal_TestApprox.test_decimal.for_a_x_in_within_1e6_.None_4", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 251, "end_line": 261, "span_ids": ["TestApprox.test_decimal"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_decimal(self):\n within_1e6 = [\n (Decimal(\"1.000001\"), Decimal(\"1.0\")),\n (Decimal(\"-1.000001\"), Decimal(\"-1.0\")),\n ]\n for a, x in within_1e6:\n assert a == approx(x)\n assert a == approx(x, rel=Decimal(\"5e-6\"), abs=0)\n assert a != approx(x, rel=Decimal(\"5e-7\"), abs=0)\n assert approx(x, rel=Decimal(\"5e-6\"), abs=0) == a\n assert approx(x, rel=Decimal(\"5e-7\"), abs=0) != a", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_fraction_TestApprox.test_fraction.for_a_x_in_within_1e6_.assert_approx_x_rel_5e_7": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_fraction_TestApprox.test_fraction.for_a_x_in_within_1e6_.assert_approx_x_rel_5e_7", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 263, "end_line": 272, "span_ids": ["TestApprox.test_fraction"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_fraction(self):\n within_1e6 = [\n (1 + Fraction(1, 1000000), Fraction(1)),\n (-1 - Fraction(-1, 1000000), Fraction(-1)),\n ]\n for a, x in within_1e6:\n assert a == approx(x, rel=5e-6, abs=0)\n assert a != approx(x, rel=5e-7, abs=0)\n assert approx(x, rel=5e-6, abs=0) == a\n assert approx(x, rel=5e-7, abs=0) != a", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_complex_TestApprox.test_complex.for_a_x_in_within_1e6_.assert_approx_x_rel_5e_7": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_complex_TestApprox.test_complex.for_a_x_in_within_1e6_.assert_approx_x_rel_5e_7", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 274, "end_line": 285, "span_ids": ["TestApprox.test_complex"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_complex(self):\n within_1e6 = [\n (1.000001 + 1.0j, 1.0 + 1.0j),\n (1.0 + 1.000001j, 1.0 + 1.0j),\n (-1.000001 + 1.0j, -1.0 + 1.0j),\n (1.0 - 1.000001j, 1.0 - 1.0j),\n ]\n for a, x in within_1e6:\n assert a == approx(x, rel=5e-6, abs=0)\n assert a != approx(x, rel=5e-7, abs=0)\n assert approx(x, rel=5e-6, abs=0) == a\n assert approx(x, rel=5e-7, abs=0) != a", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_list_TestApprox.test_list.None_3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_list_TestApprox.test_list.None_3", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 287, "end_line": 295, "span_ids": ["TestApprox.test_list"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_list(self):\n actual = [1 + 1e-7, 2 + 1e-8]\n expected = [1, 2]\n\n # Return false if any element is outside the tolerance.\n assert actual == approx(expected, rel=5e-7, abs=0)\n assert actual != approx(expected, rel=5e-8, abs=0)\n assert approx(expected, rel=5e-7, abs=0) == actual\n assert approx(expected, rel=5e-8, abs=0) != actual", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_list_wrong_len_TestApprox.test_tuple_wrong_len.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_list_wrong_len_TestApprox.test_tuple_wrong_len.None_1", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 297, "end_line": 313, "span_ids": ["TestApprox.test_tuple_wrong_len", "TestApprox.test_list_wrong_len", "TestApprox.test_tuple"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_list_wrong_len(self):\n assert [1, 2] != approx([1])\n assert [1, 2] != approx([1, 2, 3])\n\n def test_tuple(self):\n actual = (1 + 1e-7, 2 + 1e-8)\n expected = (1, 2)\n\n # Return false if any element is outside the tolerance.\n assert actual == approx(expected, rel=5e-7, abs=0)\n assert actual != approx(expected, rel=5e-8, abs=0)\n assert approx(expected, rel=5e-7, abs=0) == actual\n assert approx(expected, rel=5e-8, abs=0) != actual\n\n def test_tuple_wrong_len(self):\n assert (1, 2) != approx((1,))\n assert (1, 2) != approx((1, 2, 3))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_dict_TestApprox.test_dict_wrong_len.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_dict_TestApprox.test_dict_wrong_len.None_2", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 315, "end_line": 330, "span_ids": ["TestApprox.test_dict", "TestApprox.test_dict_wrong_len"], "tokens": 260}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_dict(self):\n actual = {\"a\": 1 + 1e-7, \"b\": 2 + 1e-8}\n # Dictionaries became ordered in python3.6, so switch up the order here\n # to make sure it doesn't matter.\n expected = {\"b\": 2, \"a\": 1}\n\n # Return false if any element is outside the tolerance.\n assert actual == approx(expected, rel=5e-7, abs=0)\n assert actual != approx(expected, rel=5e-8, abs=0)\n assert approx(expected, rel=5e-7, abs=0) == actual\n assert approx(expected, rel=5e-8, abs=0) != actual\n\n def test_dict_wrong_len(self):\n assert {\"a\": 1, \"b\": 2} != approx({\"a\": 1})\n assert {\"a\": 1, \"b\": 2} != approx({\"a\": 1, \"c\": 2})\n assert {\"a\": 1, \"b\": 2} != approx({\"a\": 1, \"b\": 2, \"c\": 3})", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_numpy_array_TestApprox.test_numpy_array.None_7": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_numpy_array_TestApprox.test_numpy_array.None_7", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 332, "end_line": 348, "span_ids": ["TestApprox.test_numpy_array"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_numpy_array(self):\n np = pytest.importorskip(\"numpy\")\n\n actual = np.array([1 + 1e-7, 2 + 1e-8])\n expected = np.array([1, 2])\n\n # Return false if any element is outside the tolerance.\n assert actual == approx(expected, rel=5e-7, abs=0)\n assert actual != approx(expected, rel=5e-8, abs=0)\n assert approx(expected, rel=5e-7, abs=0) == expected\n assert approx(expected, rel=5e-8, abs=0) != actual\n\n # Should be able to compare lists with numpy arrays.\n assert list(actual) == approx(expected, rel=5e-7, abs=0)\n assert list(actual) != approx(expected, rel=5e-8, abs=0)\n assert actual == approx(list(expected), rel=5e-7, abs=0)\n assert actual != approx(list(expected), rel=5e-8, abs=0)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_numpy_tolerance_args_TestApprox.test_numpy_tolerance_args.for_op__abs__rel_in_tes.None_5": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_numpy_tolerance_args_TestApprox.test_numpy_tolerance_args.for_op__abs__rel_in_tes.None_5", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 350, "end_line": 378, "span_ids": ["TestApprox.test_numpy_tolerance_args"], "tokens": 333}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_numpy_tolerance_args(self):\n \"\"\"\n Check that numpy rel/abs args are handled correctly\n for comparison against an np.array\n Check both sides of the operator, hopefully it doesn't impact things.\n Test all permutations of where the approx and np.array() can show up\n \"\"\"\n np = pytest.importorskip(\"numpy\")\n expected = 100.0\n actual = 99.0\n abs_diff = expected - actual\n rel_diff = (expected - actual) / expected\n\n tests = [\n (eq, abs_diff, 0),\n (eq, 0, rel_diff),\n (ne, 0, rel_diff / 2.0), # rel diff fail\n (ne, abs_diff / 2.0, 0), # abs diff fail\n ]\n\n for op, _abs, _rel in tests:\n assert op(np.array(actual), approx(expected, abs=_abs, rel=_rel)) # a, b\n assert op(approx(expected, abs=_abs, rel=_rel), np.array(actual)) # b, a\n\n assert op(actual, approx(np.array(expected), abs=_abs, rel=_rel)) # a, b\n assert op(approx(np.array(expected), abs=_abs, rel=_rel), actual) # b, a\n\n assert op(np.array(actual), approx(np.array(expected), abs=_abs, rel=_rel))\n assert op(approx(np.array(expected), abs=_abs, rel=_rel), np.array(actual))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_numpy_expecting_nan_TestApprox.test_numpy_expecting_nan.for_op_a_x_in_examples_.assert_op_a_approx_np_ar": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_numpy_expecting_nan_TestApprox.test_numpy_expecting_nan.for_op_a_x_in_examples_.assert_op_a_approx_np_ar", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 380, "end_line": 396, "span_ids": ["TestApprox.test_numpy_expecting_nan"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_numpy_expecting_nan(self):\n np = pytest.importorskip(\"numpy\")\n examples = [\n (eq, nan, nan),\n (eq, -nan, -nan),\n (eq, nan, -nan),\n (ne, 0.0, nan),\n (ne, inf, nan),\n ]\n for op, a, x in examples:\n # Nothing is equal to NaN by default.\n assert np.array(a) != approx(x)\n assert a != approx(np.array(x))\n\n # If ``nan_ok=True``, then NaN is equal to NaN.\n assert op(np.array(a), approx(x, nan_ok=True))\n assert op(a, approx(np.array(x), nan_ok=True))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_numpy_expecting_inf_TestApprox.test_numpy_expecting_inf.for_op_a_x_in_examples_.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_numpy_expecting_inf_TestApprox.test_numpy_expecting_inf.for_op_a_x_in_examples_.None_2", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 398, "end_line": 410, "span_ids": ["TestApprox.test_numpy_expecting_inf"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_numpy_expecting_inf(self):\n np = pytest.importorskip(\"numpy\")\n examples = [\n (eq, inf, inf),\n (eq, -inf, -inf),\n (ne, inf, -inf),\n (ne, 0.0, inf),\n (ne, nan, inf),\n ]\n for op, a, x in examples:\n assert op(np.array(a), approx(x))\n assert op(a, approx(np.array(x)))\n assert op(np.array(a), approx(np.array(x)))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_numpy_array_wrong_shape_TestApprox.test_doctests.runner_run_test_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_numpy_array_wrong_shape_TestApprox.test_doctests.runner_run_test_", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 412, "end_line": 427, "span_ids": ["TestApprox.test_numpy_array_wrong_shape", "TestApprox.test_doctests"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_numpy_array_wrong_shape(self):\n np = pytest.importorskip(\"numpy\")\n\n a12 = np.array([[1, 2]])\n a21 = np.array([[1], [2]])\n\n assert a12 != approx(a21)\n assert a21 != approx(a12)\n\n def test_doctests(self):\n parser = doctest.DocTestParser()\n test = parser.get_doctest(\n approx.__doc__, {\"approx\": approx}, approx.__name__, None, None\n )\n runner = MyDocTestRunner()\n runner.run(test)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_unicode_plus_minus_TestApprox.test_unicode_plus_minus.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_unicode_plus_minus_TestApprox.test_unicode_plus_minus.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 429, "end_line": 445, "span_ids": ["TestApprox.test_unicode_plus_minus"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_unicode_plus_minus(self, testdir):\n \"\"\"\n Comparing approx instances inside lists should not produce an error in the detailed diff.\n Integration test for issue #2111.\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n def test_foo():\n assert [3] == [pytest.approx(4)]\n \"\"\"\n )\n expected = \"4.0e-06\"\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\"*At index 0 diff: 3 != 4 * {}\".format(expected), \"=* 1 failed in *=\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_expected_value_type_error_TestApprox.test_comparison_operator_type_error.with_pytest_raises_TypeEr.op_1_approx_1_rel_1e_6_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_expected_value_type_error_TestApprox.test_comparison_operator_type_error.with_pytest_raises_TypeEr.op_1_approx_1_rel_1e_6_", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 447, "end_line": 476, "span_ids": ["TestApprox.test_expected_value_type_error", "TestApprox.test_comparison_operator_type_error"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n @pytest.mark.parametrize(\n \"x\",\n [\n pytest.param(None),\n pytest.param(\"string\"),\n pytest.param([\"string\"], id=\"nested-str\"),\n pytest.param([[1]], id=\"nested-list\"),\n pytest.param({\"key\": \"string\"}, id=\"dict-with-string\"),\n pytest.param({\"key\": {\"key\": 1}}, id=\"nested-dict\"),\n ],\n )\n def test_expected_value_type_error(self, x):\n with pytest.raises(TypeError):\n approx(x)\n\n @pytest.mark.parametrize(\n \"op\",\n [\n pytest.param(operator.le, id=\"<=\"),\n pytest.param(operator.lt, id=\"<\"),\n pytest.param(operator.ge, id=\">=\"),\n pytest.param(operator.gt, id=\">\"),\n ],\n )\n def test_comparison_operator_type_error(self, op):\n \"\"\"\n pytest.approx should raise TypeError for operators other than == and != (#2003).\n \"\"\"\n with pytest.raises(TypeError):\n op(1, approx(1, rel=1e-6, abs=1e-12))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_numpy_array_with_scalar_TestApprox.test_numpy_array_with_scalar.None_3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_numpy_array_with_scalar_TestApprox.test_numpy_array_with_scalar.None_3", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 478, "end_line": 487, "span_ids": ["TestApprox.test_numpy_array_with_scalar"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_numpy_array_with_scalar(self):\n np = pytest.importorskip(\"numpy\")\n\n actual = np.array([1 + 1e-7, 1 - 1e-8])\n expected = 1.0\n\n assert actual == approx(expected, rel=5e-7, abs=0)\n assert actual != approx(expected, rel=5e-8, abs=0)\n assert approx(expected, rel=5e-7, abs=0) == actual\n assert approx(expected, rel=5e-8, abs=0) != actual", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_numpy_scalar_with_array_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/approx.py_TestApprox.test_numpy_scalar_with_array_", "embedding": null, "metadata": {"file_path": "testing/python/approx.py", "file_name": "approx.py", "file_type": "text/x-python", "category": "implementation", "start_line": 489, "end_line": 510, "span_ids": ["TestApprox.test_numpy_scalar_with_array", "TestApprox.test_generic_sized_iterable_object.MySizedIterable.__iter__", "TestApprox.test_generic_sized_iterable_object", "TestApprox.test_generic_sized_iterable_object.MySizedIterable"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestApprox(object):\n\n def test_numpy_scalar_with_array(self):\n np = pytest.importorskip(\"numpy\")\n\n actual = 1.0\n expected = np.array([1 + 1e-7, 1 - 1e-8])\n\n assert actual == approx(expected, rel=5e-7, abs=0)\n assert actual != approx(expected, rel=5e-8, abs=0)\n assert approx(expected, rel=5e-7, abs=0) == actual\n assert approx(expected, rel=5e-8, abs=0) != actual\n\n def test_generic_sized_iterable_object(self):\n class MySizedIterable(object):\n def __iter__(self):\n return iter([1, 2, 3, 4])\n\n def __len__(self):\n return 4\n\n expected = MySizedIterable()\n assert [1, 2, 3, 4] == approx(expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py__coding_utf_8__TestModule.test_import_duplicate.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py__coding_utf_8__TestModule.test_import_duplicate.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 34, "span_ids": ["TestModule.test_import_duplicate", "TestModule.test_failing_import", "docstring", "imports", "TestModule"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport textwrap\n\nimport _pytest._code\nimport pytest\nfrom _pytest.main import EXIT_NOTESTSCOLLECTED\nfrom _pytest.nodes import Collector\n\n\nclass TestModule(object):\n def test_failing_import(self, testdir):\n modcol = testdir.getmodulecol(\"import alksdjalskdjalkjals\")\n pytest.raises(Collector.CollectError, modcol.collect)\n\n def test_import_duplicate(self, testdir):\n a = testdir.mkdir(\"a\")\n b = testdir.mkdir(\"b\")\n p = a.ensure(\"test_whatever.py\")\n p.pyimport()\n del sys.modules[\"test_whatever\"]\n b.ensure(\"test_whatever.py\")\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"*import*mismatch*\",\n \"*imported*test_whatever*\",\n \"*%s*\" % a.join(\"test_whatever.py\"),\n \"*not the same*\",\n \"*%s*\" % b.join(\"test_whatever.py\"),\n \"*HINT*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestModule.test_import_prepend_append_TestModule.test_import_prepend_append.with_root2_as_cwd_.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestModule.test_import_prepend_append_TestModule.test_import_prepend_append.with_root2_as_cwd_.None_1", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 36, "end_line": 58, "span_ids": ["TestModule.test_import_prepend_append"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestModule(object):\n\n def test_import_prepend_append(self, testdir, monkeypatch):\n root1 = testdir.mkdir(\"root1\")\n root2 = testdir.mkdir(\"root2\")\n root1.ensure(\"x456.py\")\n root2.ensure(\"x456.py\")\n p = root2.join(\"test_x456.py\")\n monkeypatch.syspath_prepend(str(root1))\n p.write(\n textwrap.dedent(\n \"\"\"\\\n import x456\n def test():\n assert x456.__file__.startswith({!r})\n \"\"\".format(\n str(root2)\n )\n )\n )\n with root2.as_cwd():\n reprec = testdir.inline_run(\"--import-mode=append\")\n reprec.assertoutcome(passed=0, failed=1)\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestModule.test_syntax_error_in_module_TestModule.test_invalid_test_module_name.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestModule.test_syntax_error_in_module_TestModule.test_invalid_test_module_name.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 60, "end_line": 78, "span_ids": ["TestModule.test_module_considers_pluginmanager_at_import", "TestModule.test_invalid_test_module_name", "TestModule.test_syntax_error_in_module"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestModule(object):\n\n def test_syntax_error_in_module(self, testdir):\n modcol = testdir.getmodulecol(\"this is a syntax error\")\n pytest.raises(modcol.CollectError, modcol.collect)\n pytest.raises(modcol.CollectError, modcol.collect)\n\n def test_module_considers_pluginmanager_at_import(self, testdir):\n modcol = testdir.getmodulecol(\"pytest_plugins='xasdlkj',\")\n pytest.raises(ImportError, lambda: modcol.obj)\n\n def test_invalid_test_module_name(self, testdir):\n a = testdir.mkdir(\"a\")\n a.ensure(\"test_one.part1.py\")\n result = testdir.runpytest(\"-rw\")\n result.stdout.fnmatch_lines(\n [\n \"ImportError while importing test module*test_one.part1*\",\n \"Hint: make sure your test modules/packages have valid Python names.\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestModule.test_show_traceback_import_error_TestModule.test_show_traceback_import_error.for_name_in__pytest_o.if_verbose_2_.else_.assert_name_not_in_stdout": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestModule.test_show_traceback_import_error_TestModule.test_show_traceback_import_error.for_name_in__pytest_o.if_verbose_2_.else_.assert_name_not_in_stdout", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 80, "end_line": 114, "span_ids": ["TestModule.test_show_traceback_import_error"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestModule(object):\n\n @pytest.mark.parametrize(\"verbose\", [0, 1, 2])\n def test_show_traceback_import_error(self, testdir, verbose):\n \"\"\"Import errors when collecting modules should display the traceback (#1976).\n\n With low verbosity we omit pytest and internal modules, otherwise show all traceback entries.\n \"\"\"\n testdir.makepyfile(\n foo_traceback_import_error=\"\"\"\n from bar_traceback_import_error import NOT_AVAILABLE\n \"\"\",\n bar_traceback_import_error=\"\",\n )\n testdir.makepyfile(\n \"\"\"\n import foo_traceback_import_error\n \"\"\"\n )\n args = (\"-v\",) * verbose\n result = testdir.runpytest(*args)\n result.stdout.fnmatch_lines(\n [\n \"ImportError while importing test module*\",\n \"Traceback:\",\n \"*from bar_traceback_import_error import NOT_AVAILABLE\",\n \"*cannot import name *NOT_AVAILABLE*\",\n ]\n )\n assert result.ret == 2\n\n stdout = result.stdout.str()\n for name in (\"_pytest\", os.path.join(\"py\", \"_path\")):\n if verbose == 2:\n assert name in stdout\n else:\n assert name not in stdout", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestModule.test_show_traceback_import_error_unicode_TestModule.test_show_traceback_import_error_unicode.assert_result_ret_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestModule.test_show_traceback_import_error_unicode_TestModule.test_show_traceback_import_error_unicode.assert_result_ret_2", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 116, "end_line": 134, "span_ids": ["TestModule.test_show_traceback_import_error_unicode"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestModule(object):\n\n def test_show_traceback_import_error_unicode(self, testdir):\n \"\"\"Check test modules collected which raise ImportError with unicode messages\n are handled properly (#2336).\n \"\"\"\n testdir.makepyfile(\n u\"\"\"\n # -*- coding: utf-8 -*-\n raise ImportError(u'Something bad happened \u263a')\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"ImportError while importing test module*\",\n \"Traceback:\",\n \"*raise ImportError*Something bad happened*\",\n ]\n )\n assert result.ret == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestClass_TestClass.test_class_subclassobject.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestClass_TestClass.test_class_subclassobject.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 137, "end_line": 161, "span_ids": ["TestClass", "TestClass.test_class_with_init_warning", "TestClass.test_class_subclassobject"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestClass(object):\n def test_class_with_init_warning(self, testdir):\n testdir.makepyfile(\n \"\"\"\n class TestClass1(object):\n def __init__(self):\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"-rw\")\n result.stdout.fnmatch_lines(\n [\n \"*cannot collect test class 'TestClass1' because it has a __init__ constructor\"\n ]\n )\n\n def test_class_subclassobject(self, testdir):\n testdir.getmodulecol(\n \"\"\"\n class test(object):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*collected 0*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestClass.test_static_method_TestClass.test_static_method.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestClass.test_static_method_TestClass.test_static_method.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 163, "end_line": 183, "span_ids": ["TestClass.test_static_method"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestClass(object):\n\n def test_static_method(self, testdir):\n \"\"\"Support for collecting staticmethod tests (#2528, #2699)\"\"\"\n testdir.getmodulecol(\n \"\"\"\n import pytest\n class Test(object):\n @staticmethod\n def test_something():\n pass\n\n @pytest.fixture\n def fix(self):\n return 1\n\n @staticmethod\n def test_fix(fix):\n assert fix == 1\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*collected 2 items*\", \"*2 passed in*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestClass.test_setup_teardown_class_as_classmethod_TestClass.test_issue2234_property.assert_result_ret_EXIT": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestClass.test_setup_teardown_class_as_classmethod_TestClass.test_issue2234_property.assert_result_ret_EXIT", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 185, "end_line": 238, "span_ids": ["TestClass.test_issue2234_property", "TestClass.test_issue1579_namedtuple", "TestClass.test_issue1035_obj_has_getattr", "TestClass.test_setup_teardown_class_as_classmethod"], "tokens": 321}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestClass(object):\n\n def test_setup_teardown_class_as_classmethod(self, testdir):\n testdir.makepyfile(\n test_mod1=\"\"\"\n class TestClassMethod(object):\n @classmethod\n def setup_class(cls):\n pass\n def test_1(self):\n pass\n @classmethod\n def teardown_class(cls):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*1 passed*\"])\n\n def test_issue1035_obj_has_getattr(self, testdir):\n modcol = testdir.getmodulecol(\n \"\"\"\n class Chameleon(object):\n def __getattr__(self, name):\n return True\n chameleon = Chameleon()\n \"\"\"\n )\n colitems = modcol.collect()\n assert len(colitems) == 0\n\n def test_issue1579_namedtuple(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import collections\n\n TestCase = collections.namedtuple('TestCase', ['a'])\n \"\"\"\n )\n result = testdir.runpytest(\"-rw\")\n result.stdout.fnmatch_lines(\n \"*cannot collect test class 'TestCase' \"\n \"because it has a __new__ constructor*\"\n )\n\n def test_issue2234_property(self, testdir):\n testdir.makepyfile(\n \"\"\"\n class TestCase(object):\n @property\n def prop(self):\n raise NotImplementedError()\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret == EXIT_NOTESTSCOLLECTED", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction_TestFunction.test_function_as_object_instance_ignored.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction_TestFunction.test_function_as_object_instance_ignored.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 241, "end_line": 266, "span_ids": ["TestFunction", "TestFunction.test_getmodulecollector", "TestFunction.test_function_as_object_instance_ignored"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFunction(object):\n def test_getmodulecollector(self, testdir):\n item = testdir.getitem(\"def test_func(): pass\")\n modcol = item.getparent(pytest.Module)\n assert isinstance(modcol, pytest.Module)\n assert hasattr(modcol.obj, \"test_func\")\n\n @pytest.mark.filterwarnings(\"default\")\n def test_function_as_object_instance_ignored(self, testdir):\n testdir.makepyfile(\n \"\"\"\n class A(object):\n def __call__(self, tmpdir):\n 0/0\n\n test_a = A()\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"collected 0 items\",\n \"*test_function_as_object_instance_ignored.py:2: \"\n \"*cannot collect 'test_a' because it is not a function.\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.make_function_TestFunction.test_issue213_parametrize_value_no_equal.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.make_function_TestFunction.test_issue213_parametrize_value_no_equal.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 268, "end_line": 333, "span_ids": ["TestFunction.test_single_tuple_unwraps_values", "TestFunction.test_function_equality", "TestFunction.test_repr_produces_actual_test_id", "TestFunction.test_issue213_parametrize_value_no_equal", "TestFunction.test_issue197_parametrize_emptyset", "TestFunction.make_function"], "tokens": 472}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFunction(object):\n\n @staticmethod\n def make_function(testdir, **kwargs):\n from _pytest.fixtures import FixtureManager\n\n config = testdir.parseconfigure()\n session = testdir.Session(config)\n session._fixturemanager = FixtureManager(session)\n\n return pytest.Function(config=config, parent=session, **kwargs)\n\n def test_function_equality(self, testdir, tmpdir):\n def func1():\n pass\n\n def func2():\n pass\n\n f1 = self.make_function(testdir, name=\"name\", args=(1,), callobj=func1)\n assert f1 == f1\n f2 = self.make_function(testdir, name=\"name\", callobj=func2)\n assert f1 != f2\n\n def test_repr_produces_actual_test_id(self, testdir):\n f = self.make_function(\n testdir, name=r\"test[\\xe5]\", callobj=self.test_repr_produces_actual_test_id\n )\n assert repr(f) == r\"\"\n\n def test_issue197_parametrize_emptyset(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.parametrize('arg', [])\n def test_function(arg):\n pass\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(skipped=1)\n\n def test_single_tuple_unwraps_values(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.parametrize(('arg',), [(1,)])\n def test_function(arg):\n assert arg == 1\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)\n\n def test_issue213_parametrize_value_no_equal(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n class A(object):\n def __eq__(self, other):\n raise ValueError(\"not possible\")\n @pytest.mark.parametrize('arg', [A()])\n def test_function(arg):\n assert arg.__class__.__name__ == \"A\"\n \"\"\"\n )\n reprec = testdir.inline_run(\"--fulltrace\")\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_parametrize_with_non_hashable_values_TestFunction.test_parametrize_with_non_hashable_values.rec_assertoutcome_passed_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_parametrize_with_non_hashable_values_TestFunction.test_parametrize_with_non_hashable_values.rec_assertoutcome_passed_", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 335, "end_line": 353, "span_ids": ["TestFunction.test_parametrize_with_non_hashable_values"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFunction(object):\n\n def test_parametrize_with_non_hashable_values(self, testdir):\n \"\"\"Test parametrization with non-hashable values.\"\"\"\n testdir.makepyfile(\n \"\"\"\n archival_mapping = {\n '1.0': {'tag': '1.0'},\n '1.2.2a1': {'tag': 'release-1.2.2a1'},\n }\n\n import pytest\n @pytest.mark.parametrize('key value'.split(),\n archival_mapping.items())\n def test_archival_to_version(key, value):\n assert key in archival_mapping\n assert value == archival_mapping[key]\n \"\"\"\n )\n rec = testdir.inline_run()\n rec.assertoutcome(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_parametrize_with_non_hashable_values_indirect_TestFunction.test_parametrize_with_non_hashable_values_indirect.rec_assertoutcome_passed_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_parametrize_with_non_hashable_values_indirect_TestFunction.test_parametrize_with_non_hashable_values_indirect.rec_assertoutcome_passed_", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 355, "end_line": 382, "span_ids": ["TestFunction.test_parametrize_with_non_hashable_values_indirect"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFunction(object):\n\n def test_parametrize_with_non_hashable_values_indirect(self, testdir):\n \"\"\"Test parametrization with non-hashable values with indirect parametrization.\"\"\"\n testdir.makepyfile(\n \"\"\"\n archival_mapping = {\n '1.0': {'tag': '1.0'},\n '1.2.2a1': {'tag': 'release-1.2.2a1'},\n }\n\n import pytest\n\n @pytest.fixture\n def key(request):\n return request.param\n\n @pytest.fixture\n def value(request):\n return request.param\n\n @pytest.mark.parametrize('key value'.split(),\n archival_mapping.items(), indirect=True)\n def test_archival_to_version(key, value):\n assert key in archival_mapping\n assert value == archival_mapping[key]\n \"\"\"\n )\n rec = testdir.inline_run()\n rec.assertoutcome(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_parametrize_overrides_fixture_TestFunction.test_parametrize_overrides_fixture.rec_assertoutcome_passed_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_parametrize_overrides_fixture_TestFunction.test_parametrize_overrides_fixture.rec_assertoutcome_passed_", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 384, "end_line": 411, "span_ids": ["TestFunction.test_parametrize_overrides_fixture"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFunction(object):\n\n def test_parametrize_overrides_fixture(self, testdir):\n \"\"\"Test parametrization when parameter overrides existing fixture with same name.\"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture\n def value():\n return 'value'\n\n @pytest.mark.parametrize('value',\n ['overridden'])\n def test_overridden_via_param(value):\n assert value == 'overridden'\n\n @pytest.mark.parametrize('somevalue', ['overridden'])\n def test_not_overridden(value, somevalue):\n assert value == 'value'\n assert somevalue == 'overridden'\n\n @pytest.mark.parametrize('other,value', [('foo', 'overridden')])\n def test_overridden_via_multiparam(other, value):\n assert other == 'foo'\n assert value == 'overridden'\n \"\"\"\n )\n rec = testdir.inline_run()\n rec.assertoutcome(passed=3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_parametrize_overrides_parametrized_fixture_TestFunction.test_parametrize_overrides_parametrized_fixture.rec_assertoutcome_passed_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_parametrize_overrides_parametrized_fixture_TestFunction.test_parametrize_overrides_parametrized_fixture.rec_assertoutcome_passed_", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 413, "end_line": 430, "span_ids": ["TestFunction.test_parametrize_overrides_parametrized_fixture"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFunction(object):\n\n def test_parametrize_overrides_parametrized_fixture(self, testdir):\n \"\"\"Test parametrization when parameter overrides existing parametrized fixture with same name.\"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture(params=[1, 2])\n def value(request):\n return request.param\n\n @pytest.mark.parametrize('value',\n ['overridden'])\n def test_overridden_via_param(value):\n assert value == 'overridden'\n \"\"\"\n )\n rec = testdir.inline_run()\n rec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_parametrize_overrides_indirect_dependency_fixture_TestFunction.test_parametrize_overrides_indirect_dependency_fixture.rec_assertoutcome_passed_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_parametrize_overrides_indirect_dependency_fixture_TestFunction.test_parametrize_overrides_indirect_dependency_fixture.rec_assertoutcome_passed_", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 432, "end_line": 461, "span_ids": ["TestFunction.test_parametrize_overrides_indirect_dependency_fixture"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFunction(object):\n\n def test_parametrize_overrides_indirect_dependency_fixture(self, testdir):\n \"\"\"Test parametrization when parameter overrides a fixture that a test indirectly depends on\"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n fix3_instantiated = False\n\n @pytest.fixture\n def fix1(fix2):\n return fix2 + '1'\n\n @pytest.fixture\n def fix2(fix3):\n return fix3 + '2'\n\n @pytest.fixture\n def fix3():\n global fix3_instantiated\n fix3_instantiated = True\n return '3'\n\n @pytest.mark.parametrize('fix2', ['2'])\n def test_it(fix1):\n assert fix1 == '21'\n assert not fix3_instantiated\n \"\"\"\n )\n rec = testdir.inline_run()\n rec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_parametrize_with_mark_TestFunction.test_parametrize_with_mark.assert_foo_in_keywords_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_parametrize_with_mark_TestFunction.test_parametrize_with_mark.assert_foo_in_keywords_", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 463, "end_line": 482, "span_ids": ["TestFunction.test_parametrize_with_mark"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFunction(object):\n\n def test_parametrize_with_mark(self, testdir):\n items = testdir.getitems(\n \"\"\"\n import pytest\n @pytest.mark.foo\n @pytest.mark.parametrize('arg', [\n 1,\n pytest.param(2, marks=[pytest.mark.baz, pytest.mark.bar])\n ])\n def test_function(arg):\n pass\n \"\"\"\n )\n keywords = [item.keywords for item in items]\n assert (\n \"foo\" in keywords[0]\n and \"bar\" not in keywords[0]\n and \"baz\" not in keywords[0]\n )\n assert \"foo\" in keywords[1] and \"bar\" in keywords[1] and \"baz\" in keywords[1]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_function_equality_with_callspec_TestFunction.test_pyfunc_call.config_hook_pytest_pyfunc": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_function_equality_with_callspec_TestFunction.test_pyfunc_call.config_hook_pytest_pyfunc", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 484, "end_line": 511, "span_ids": ["TestFunction.test_pyfunc_call.MyPlugin1.pytest_pyfunc_call", "TestFunction.test_pyfunc_call.MyPlugin2", "TestFunction.test_pyfunc_call.MyPlugin1", "TestFunction.test_pyfunc_call", "TestFunction.test_pyfunc_call.MyPlugin2.pytest_pyfunc_call", "TestFunction.test_function_equality_with_callspec"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFunction(object):\n\n def test_function_equality_with_callspec(self, testdir, tmpdir):\n items = testdir.getitems(\n \"\"\"\n import pytest\n @pytest.mark.parametrize('arg', [1,2])\n def test_function(arg):\n pass\n \"\"\"\n )\n assert items[0] != items[1]\n assert not (items[0] == items[1])\n\n def test_pyfunc_call(self, testdir):\n item = testdir.getitem(\"def test_func(): raise ValueError\")\n config = item.config\n\n class MyPlugin1(object):\n def pytest_pyfunc_call(self, pyfuncitem):\n raise ValueError\n\n class MyPlugin2(object):\n def pytest_pyfunc_call(self, pyfuncitem):\n return True\n\n config.pluginmanager.register(MyPlugin1())\n config.pluginmanager.register(MyPlugin2())\n config.hook.pytest_runtest_setup(item=item)\n config.hook.pytest_pyfunc_call(pyfuncitem=item)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_multiple_parametrize_TestFunction.test_multiple_parametrize.assert_colitems_3_name_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_multiple_parametrize_TestFunction.test_multiple_parametrize.assert_colitems_3_name_", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 513, "end_line": 527, "span_ids": ["TestFunction.test_multiple_parametrize"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFunction(object):\n\n def test_multiple_parametrize(self, testdir):\n modcol = testdir.getmodulecol(\n \"\"\"\n import pytest\n @pytest.mark.parametrize('x', [0, 1])\n @pytest.mark.parametrize('y', [2, 3])\n def test1(x, y):\n pass\n \"\"\"\n )\n colitems = modcol.collect()\n assert colitems[0].name == \"test1[2-0]\"\n assert colitems[1].name == \"test1[2-1]\"\n assert colitems[2].name == \"test1[3-0]\"\n assert colitems[3].name == \"test1[3-1]\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_issue751_multiple_parametrize_with_ids_TestFunction.test_issue751_multiple_parametrize_with_ids.assert_colitems_3_name_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_issue751_multiple_parametrize_with_ids_TestFunction.test_issue751_multiple_parametrize_with_ids.assert_colitems_3_name_", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 529, "end_line": 546, "span_ids": ["TestFunction.test_issue751_multiple_parametrize_with_ids"], "tokens": 183}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFunction(object):\n\n def test_issue751_multiple_parametrize_with_ids(self, testdir):\n modcol = testdir.getmodulecol(\n \"\"\"\n import pytest\n @pytest.mark.parametrize('x', [0], ids=['c'])\n @pytest.mark.parametrize('y', [0, 1], ids=['a', 'b'])\n class Test(object):\n def test1(self, x, y):\n pass\n def test2(self, x, y):\n pass\n \"\"\"\n )\n colitems = modcol.collect()[0].collect()[0].collect()\n assert colitems[0].name == \"test1[a-c]\"\n assert colitems[1].name == \"test1[b-c]\"\n assert colitems[2].name == \"test2[a-c]\"\n assert colitems[3].name == \"test2[b-c]\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_parametrize_skipif_TestFunction.test_function_original_name.assert_x_originalname_fo": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestFunction.test_parametrize_skipif_TestFunction.test_function_original_name.assert_x_originalname_fo", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 548, "end_line": 647, "span_ids": ["TestFunction.test_parametrize_xfail_passed", "TestFunction.test_parametrize_skipif", "TestFunction.test_parametrize_xfail", "TestFunction.test_function_original_name", "TestFunction.test_parametrize_skip", "TestFunction.test_parametrize_skipif_no_skip", "TestFunction.test_parametrize_passed"], "tokens": 663}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFunction(object):\n\n def test_parametrize_skipif(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n m = pytest.mark.skipif('True')\n\n @pytest.mark.parametrize('x', [0, 1, pytest.param(2, marks=m)])\n def test_skip_if(x):\n assert x < 2\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"* 2 passed, 1 skipped in *\"])\n\n def test_parametrize_skip(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n m = pytest.mark.skip('')\n\n @pytest.mark.parametrize('x', [0, 1, pytest.param(2, marks=m)])\n def test_skip(x):\n assert x < 2\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"* 2 passed, 1 skipped in *\"])\n\n def test_parametrize_skipif_no_skip(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n m = pytest.mark.skipif('False')\n\n @pytest.mark.parametrize('x', [0, 1, m(2)])\n def test_skipif_no_skip(x):\n assert x < 2\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"* 1 failed, 2 passed in *\"])\n\n def test_parametrize_xfail(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n m = pytest.mark.xfail('True')\n\n @pytest.mark.parametrize('x', [0, 1, pytest.param(2, marks=m)])\n def test_xfail(x):\n assert x < 2\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"* 2 passed, 1 xfailed in *\"])\n\n def test_parametrize_passed(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n m = pytest.mark.xfail('True')\n\n @pytest.mark.parametrize('x', [0, 1, pytest.param(2, marks=m)])\n def test_xfail(x):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"* 2 passed, 1 xpassed in *\"])\n\n def test_parametrize_xfail_passed(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n m = pytest.mark.xfail('False')\n\n @pytest.mark.parametrize('x', [0, 1, m(2)])\n def test_passed(x):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"* 3 passed in *\"])\n\n def test_function_original_name(self, testdir):\n items = testdir.getitems(\n \"\"\"\n import pytest\n @pytest.mark.parametrize('arg', [1,2])\n def test_func(arg):\n pass\n \"\"\"\n )\n assert [x.originalname for x in items] == [\"test_func\", \"test_func\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestSorting_TestSorting.test_check_equality.for_fn_in_fn1_fn2_fn3_.assert_modcol_fn": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestSorting_TestSorting.test_check_equality.for_fn_in_fn1_fn2_fn3_.assert_modcol_fn", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 650, "end_line": 679, "span_ids": ["TestSorting", "TestSorting.test_check_equality"], "tokens": 257}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSorting(object):\n def test_check_equality(self, testdir):\n modcol = testdir.getmodulecol(\n \"\"\"\n def test_pass(): pass\n def test_fail(): assert 0\n \"\"\"\n )\n fn1 = testdir.collect_by_name(modcol, \"test_pass\")\n assert isinstance(fn1, pytest.Function)\n fn2 = testdir.collect_by_name(modcol, \"test_pass\")\n assert isinstance(fn2, pytest.Function)\n\n assert fn1 == fn2\n assert fn1 != modcol\n if sys.version_info < (3, 0):\n assert cmp(fn1, fn2) == 0 # NOQA\n assert hash(fn1) == hash(fn2)\n\n fn3 = testdir.collect_by_name(modcol, \"test_fail\")\n assert isinstance(fn3, pytest.Function)\n assert not (fn1 == fn3)\n assert fn1 != fn3\n\n for fn in fn1, fn2, fn3:\n assert fn != 3\n assert fn != modcol\n assert fn != [1, 2, 3]\n assert [1, 2, 3] != fn\n assert modcol != fn", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestSorting.test_allow_sane_sorting_for_decorators_TestSorting.test_allow_sane_sorting_for_decorators.assert_item_name_for_ite": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestSorting.test_allow_sane_sorting_for_decorators_TestSorting.test_allow_sane_sorting_for_decorators.assert_item_name_for_ite", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 681, "end_line": 701, "span_ids": ["TestSorting.test_allow_sane_sorting_for_decorators"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSorting(object):\n\n def test_allow_sane_sorting_for_decorators(self, testdir):\n modcol = testdir.getmodulecol(\n \"\"\"\n def dec(f):\n g = lambda: f(2)\n g.place_as = f\n return g\n\n\n def test_b(y):\n pass\n test_b = dec(test_b)\n\n def test_a(y):\n pass\n test_a = dec(test_a)\n \"\"\"\n )\n colitems = modcol.collect()\n assert len(colitems) == 2\n assert [item.name for item in colitems] == [\"test_b\", \"test_a\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestConftestCustomization_TestConftestCustomization.test_pytest_pycollect_module.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestConftestCustomization_TestConftestCustomization.test_pytest_pycollect_module.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 704, "end_line": 719, "span_ids": ["TestConftestCustomization", "TestConftestCustomization.test_pytest_pycollect_module"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestConftestCustomization(object):\n def test_pytest_pycollect_module(self, testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n class MyModule(pytest.Module):\n pass\n def pytest_pycollect_makemodule(path, parent):\n if path.basename == \"test_xyz.py\":\n return MyModule(path, parent)\n \"\"\"\n )\n testdir.makepyfile(\"def test_some(): pass\")\n testdir.makepyfile(test_xyz=\"def test_func(): pass\")\n result = testdir.runpytest(\"--collect-only\")\n result.stdout.fnmatch_lines([\"* 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestTracebackCutting.test_traceback_error_during_import_TestTracebackCutting.test_traceback_error_during_import.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestTracebackCutting.test_traceback_error_during_import_TestTracebackCutting.test_traceback_error_during_import.None_2", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 911, "end_line": 930, "span_ids": ["TestTracebackCutting.test_traceback_error_during_import"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTracebackCutting(object):\n\n def test_traceback_error_during_import(self, testdir):\n testdir.makepyfile(\n \"\"\"\n x = 1\n x = 2\n x = 17\n asd\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret != 0\n out = result.stdout.str()\n assert \"x = 1\" not in out\n assert \"x = 2\" not in out\n result.stdout.fnmatch_lines([\" *asd*\", \"E*NameError*\"])\n result = testdir.runpytest(\"--fulltrace\")\n out = result.stdout.str()\n assert \"x = 1\" in out\n assert \"x = 2\" in out\n result.stdout.fnmatch_lines([\">*asd*\", \"E*NameError*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestTracebackCutting.test_traceback_filter_error_during_fixture_collection_TestTracebackCutting.test_traceback_filter_error_during_fixture_collection.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestTracebackCutting.test_traceback_filter_error_during_fixture_collection_TestTracebackCutting.test_traceback_filter_error_during_fixture_collection.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 932, "end_line": 957, "span_ids": ["TestTracebackCutting.test_traceback_filter_error_during_fixture_collection"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTracebackCutting(object):\n\n def test_traceback_filter_error_during_fixture_collection(self, testdir):\n \"\"\"integration test for issue #995.\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n def fail_me(func):\n ns = {}\n exec('def w(): raise ValueError(\"fail me\")', ns)\n return ns['w']\n\n @pytest.fixture(scope='class')\n @fail_me\n def fail_fixture():\n pass\n\n def test_failing_fixture(fail_fixture):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret != 0\n out = result.stdout.str()\n assert \"INTERNALERROR>\" not in out\n result.stdout.fnmatch_lines([\"*ValueError: fail me*\", \"* 1 error in *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestTracebackCutting.test_filter_traceback_generated_code_TestTracebackCutting.test_filter_traceback_generated_code.assert_not_filter_traceba": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestTracebackCutting.test_filter_traceback_generated_code_TestTracebackCutting.test_filter_traceback_generated_code.assert_not_filter_traceba", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 959, "end_line": 978, "span_ids": ["TestTracebackCutting.test_filter_traceback_generated_code"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTracebackCutting(object):\n\n def test_filter_traceback_generated_code(self):\n \"\"\"test that filter_traceback() works with the fact that\n _pytest._code.code.Code.path attribute might return an str object.\n In this case, one of the entries on the traceback was produced by\n dynamically generated code.\n See: https://bitbucket.org/pytest-dev/py/issues/71\n This fixes #995.\n \"\"\"\n from _pytest.python import filter_traceback\n\n try:\n ns = {}\n exec(\"def foo(): raise ValueError\", ns)\n ns[\"foo\"]()\n except ValueError:\n _, _, tb = sys.exc_info()\n\n tb = _pytest._code.Traceback(tb)\n assert isinstance(tb[-1].path, str)\n assert not filter_traceback(tb[-1])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestTracebackCutting.test_filter_traceback_path_no_longer_valid_TestTracebackCutting.test_filter_traceback_path_no_longer_valid.assert_filter_traceback_t": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestTracebackCutting.test_filter_traceback_path_no_longer_valid_TestTracebackCutting.test_filter_traceback_path_no_longer_valid.assert_filter_traceback_t", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 980, "end_line": 1005, "span_ids": ["TestTracebackCutting.test_filter_traceback_path_no_longer_valid"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTracebackCutting(object):\n\n def test_filter_traceback_path_no_longer_valid(self, testdir):\n \"\"\"test that filter_traceback() works with the fact that\n _pytest._code.code.Code.path attribute might return an str object.\n In this case, one of the files in the traceback no longer exists.\n This fixes #1133.\n \"\"\"\n from _pytest.python import filter_traceback\n\n testdir.syspathinsert()\n testdir.makepyfile(\n filter_traceback_entry_as_str=\"\"\"\n def foo():\n raise ValueError\n \"\"\"\n )\n try:\n import filter_traceback_entry_as_str\n\n filter_traceback_entry_as_str.foo()\n except ValueError:\n _, _, tb = sys.exc_info()\n\n testdir.tmpdir.join(\"filter_traceback_entry_as_str.py\").remove()\n tb = _pytest._code.Traceback(tb)\n assert isinstance(tb[-1].path, str)\n assert filter_traceback(tb[-1])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestReportInfo_TestReportInfo.test_itemreport_reportinfo.assert_item_location_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestReportInfo_TestReportInfo.test_itemreport_reportinfo.assert_item_location_", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1008, "end_line": 1023, "span_ids": ["TestReportInfo", "TestReportInfo.test_itemreport_reportinfo"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestReportInfo(object):\n def test_itemreport_reportinfo(self, testdir, linecomp):\n testdir.makeconftest(\n \"\"\"\n import pytest\n class MyFunction(pytest.Function):\n def reportinfo(self):\n return \"ABCDE\", 42, \"custom\"\n def pytest_pycollect_makeitem(collector, name, obj):\n if name == \"test_func\":\n return MyFunction(name, parent=collector)\n \"\"\"\n )\n item = testdir.getitem(\"def test_func(): pass\")\n item.config.pluginmanager.getplugin(\"runner\")\n assert item.location == (\"ABCDE\", 42, \"custom\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestReportInfo.test_func_reportinfo_TestReportInfo.test_class_reportinfo.assert_msg_TestClass_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestReportInfo.test_func_reportinfo_TestReportInfo.test_class_reportinfo.assert_msg_TestClass_", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1025, "end_line": 1044, "span_ids": ["TestReportInfo.test_class_reportinfo", "TestReportInfo.test_func_reportinfo"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestReportInfo(object):\n\n def test_func_reportinfo(self, testdir):\n item = testdir.getitem(\"def test_func(): pass\")\n fspath, lineno, modpath = item.reportinfo()\n assert fspath == item.fspath\n assert lineno == 0\n assert modpath == \"test_func\"\n\n def test_class_reportinfo(self, testdir):\n modcol = testdir.getmodulecol(\n \"\"\"\n # lineno 0\n class TestClass(object):\n def test_hello(self): pass\n \"\"\"\n )\n classcol = testdir.collect_by_name(modcol, \"TestClass\")\n fspath, lineno, msg = classcol.reportinfo()\n assert fspath == modcol.fspath\n assert lineno == 1\n assert msg == \"TestClass\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestReportInfo.test_reportinfo_with_nasty_getattr_TestReportInfo.test_reportinfo_with_nasty_getattr.fspath_lineno_msg_ins": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_TestReportInfo.test_reportinfo_with_nasty_getattr_TestReportInfo.test_reportinfo_with_nasty_getattr.fspath_lineno_msg_ins", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1046, "end_line": 1064, "span_ids": ["TestReportInfo.test_reportinfo_with_nasty_getattr"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestReportInfo(object):\n\n @pytest.mark.filterwarnings(\n \"ignore:usage of Generator.Function is deprecated, please use pytest.Function instead\"\n )\n def test_reportinfo_with_nasty_getattr(self, testdir):\n # https://github.com/pytest-dev/pytest/issues/1204\n modcol = testdir.getmodulecol(\n \"\"\"\n # lineno 0\n class TestClass(object):\n def __getattr__(self, name):\n return \"this is not an int\"\n\n def test_foo(self):\n pass\n \"\"\"\n )\n classcol = testdir.collect_by_name(modcol, \"TestClass\")\n instance = classcol.collect()[0]\n fspath, lineno, msg = instance.reportinfo()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_customized_python_discovery_test_customized_python_discovery.None_3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_customized_python_discovery_test_customized_python_discovery.None_3", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1067, "end_line": 1094, "span_ids": ["test_customized_python_discovery"], "tokens": 183}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_customized_python_discovery(testdir):\n testdir.makeini(\n \"\"\"\n [pytest]\n python_files=check_*.py\n python_classes=Check\n python_functions=check\n \"\"\"\n )\n p = testdir.makepyfile(\n \"\"\"\n def check_simple():\n pass\n class CheckMyApp(object):\n def check_meth(self):\n pass\n \"\"\"\n )\n p2 = p.new(basename=p.basename.replace(\"test\", \"check\"))\n p.move(p2)\n result = testdir.runpytest(\"--collect-only\", \"-s\")\n result.stdout.fnmatch_lines(\n [\"*check_customized*\", \"*check_simple*\", \"*CheckMyApp*\", \"*check_meth*\"]\n )\n\n result = testdir.runpytest()\n assert result.ret == 0\n result.stdout.fnmatch_lines([\"*2 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_customized_python_discovery_functions_test_unorderable_types.assert_result_ret_EXIT": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_customized_python_discovery_functions_test_unorderable_types.assert_result_ret_EXIT", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1097, "end_line": 1134, "span_ids": ["test_customized_python_discovery_functions", "test_unorderable_types"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_customized_python_discovery_functions(testdir):\n testdir.makeini(\n \"\"\"\n [pytest]\n python_functions=_test\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n def _test_underscore():\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"--collect-only\", \"-s\")\n result.stdout.fnmatch_lines([\"*_test_underscore*\"])\n\n result = testdir.runpytest()\n assert result.ret == 0\n result.stdout.fnmatch_lines([\"*1 passed*\"])\n\n\ndef test_unorderable_types(testdir):\n testdir.makepyfile(\n \"\"\"\n class TestJoinEmpty(object):\n pass\n\n def make_test():\n class Test(object):\n pass\n Test.__name__ = \"TestFoo\"\n return Test\n TestFoo = make_test()\n \"\"\"\n )\n result = testdir.runpytest()\n assert \"TypeError\" not in result.stdout.str()\n assert result.ret == EXIT_NOTESTSCOLLECTED", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_collect_functools_partial_test_collect_functools_partial.result_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_collect_functools_partial_test_collect_functools_partial.result_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1137, "end_line": 1180, "span_ids": ["test_collect_functools_partial"], "tokens": 332}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_collect_functools_partial(testdir):\n \"\"\"\n Test that collection of functools.partial object works, and arguments\n to the wrapped functions are dealt correctly (see #811).\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n import functools\n import pytest\n\n @pytest.fixture\n def fix1():\n return 'fix1'\n\n @pytest.fixture\n def fix2():\n return 'fix2'\n\n def check1(i, fix1):\n assert i == 2\n assert fix1 == 'fix1'\n\n def check2(fix1, i):\n assert i == 2\n assert fix1 == 'fix1'\n\n def check3(fix1, i, fix2):\n assert i == 2\n assert fix1 == 'fix1'\n assert fix2 == 'fix2'\n\n test_ok_1 = functools.partial(check1, i=2)\n test_ok_2 = functools.partial(check1, i=2, fix1='fix1')\n test_ok_3 = functools.partial(check1, 2)\n test_ok_4 = functools.partial(check2, i=2)\n test_ok_5 = functools.partial(check3, i=2)\n test_ok_6 = functools.partial(check3, i=2, fix1='fix1')\n\n test_fail_1 = functools.partial(check2, 2)\n test_fail_2 = functools.partial(check3, 2)\n \"\"\"\n )\n result = testdir.inline_run()\n result.assertoutcome(passed=6, failed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_dont_collect_non_function_callable_test_dont_collect_non_function_callable.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_dont_collect_non_function_callable_test_dont_collect_non_function_callable.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1183, "end_line": 1209, "span_ids": ["test_dont_collect_non_function_callable"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"default\")\ndef test_dont_collect_non_function_callable(testdir):\n \"\"\"Test for issue https://github.com/pytest-dev/pytest/issues/331\n\n In this case an INTERNALERROR occurred trying to report the failure of\n a test like this one because py test failed to get the source lines.\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n class Oh(object):\n def __call__(self):\n pass\n\n test_a = Oh()\n\n def test_real():\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"-rw\")\n result.stdout.fnmatch_lines(\n [\n \"*collected 1 item*\",\n \"*test_dont_collect_non_function_callable.py:2: *cannot collect 'test_a' because it is not a function*\",\n \"*1 passed, 1 warnings in *\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_class_injection_does_not_break_collection_test_class_injection_does_not_break_collection.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_class_injection_does_not_break_collection_test_class_injection_does_not_break_collection.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1212, "end_line": 1239, "span_ids": ["test_class_injection_does_not_break_collection"], "tokens": 183}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_class_injection_does_not_break_collection(testdir):\n \"\"\"Tests whether injection during collection time will terminate testing.\n\n In this case the error should not occur if the TestClass itself\n is modified during collection time, and the original method list\n is still used for collection.\n \"\"\"\n testdir.makeconftest(\n \"\"\"\n from test_inject import TestClass\n def pytest_generate_tests(metafunc):\n TestClass.changed_var = {}\n \"\"\"\n )\n testdir.makepyfile(\n test_inject='''\n class TestClass(object):\n def test_injection(self):\n \"\"\"Test being parametrized.\"\"\"\n pass\n '''\n )\n result = testdir.runpytest()\n assert (\n \"RuntimeError: dictionary changed size during iteration\"\n not in result.stdout.str()\n )\n result.stdout.fnmatch_lines([\"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_syntax_error_with_non_ascii_chars_test_skip_duplicates_by_default.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_syntax_error_with_non_ascii_chars_test_skip_duplicates_by_default.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1242, "end_line": 1273, "span_ids": ["test_syntax_error_with_non_ascii_chars", "test_skip_duplicates_by_default"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_syntax_error_with_non_ascii_chars(testdir):\n \"\"\"Fix decoding issue while formatting SyntaxErrors during collection (#578)\n \"\"\"\n testdir.makepyfile(\n u\"\"\"\n # -*- coding: UTF-8 -*-\n\n \u2603\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*ERROR collecting*\", \"*SyntaxError*\", \"*1 error in*\"])\n\n\ndef test_skip_duplicates_by_default(testdir):\n \"\"\"Test for issue https://github.com/pytest-dev/pytest/issues/1609 (#1609)\n\n Ignore duplicate directories.\n \"\"\"\n a = testdir.mkdir(\"a\")\n fh = a.join(\"test_a.py\")\n fh.write(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n def test_real():\n pass\n \"\"\"\n )\n )\n result = testdir.runpytest(a.strpath, a.strpath)\n result.stdout.fnmatch_lines([\"*collected 1 item*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_keep_duplicates_test_keep_duplicates.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_keep_duplicates_test_keep_duplicates.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1276, "end_line": 1293, "span_ids": ["test_keep_duplicates"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_keep_duplicates(testdir):\n \"\"\"Test for issue https://github.com/pytest-dev/pytest/issues/1609 (#1609)\n\n Use --keep-duplicates to collect tests from duplicate directories.\n \"\"\"\n a = testdir.mkdir(\"a\")\n fh = a.join(\"test_a.py\")\n fh.write(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n def test_real():\n pass\n \"\"\"\n )\n )\n result = testdir.runpytest(\"--keep-duplicates\", a.strpath, a.strpath)\n result.stdout.fnmatch_lines([\"*collected 2 item*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_package_collection_infinite_recursion_test_package_collection_init_given_as_argument.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_package_collection_infinite_recursion_test_package_collection_init_given_as_argument.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1296, "end_line": 1306, "span_ids": ["test_package_collection_infinite_recursion", "test_package_collection_init_given_as_argument"], "tokens": 110}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_package_collection_infinite_recursion(testdir):\n testdir.copy_example(\"collect/package_infinite_recursion\")\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*1 passed*\"])\n\n\ndef test_package_collection_init_given_as_argument(testdir):\n \"\"\"Regression test for #3749\"\"\"\n p = testdir.copy_example(\"collect/package_init_given_as_arg\")\n result = testdir.runpytest(p / \"pkg\" / \"__init__.py\")\n result.stdout.fnmatch_lines([\"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_package_with_modules_test_package_with_modules.None_7": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_package_with_modules_test_package_with_modules.None_7", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1309, "end_line": 1346, "span_ids": ["test_package_with_modules"], "tokens": 320}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_package_with_modules(testdir):\n \"\"\"\n .\n \u2514\u2500\u2500 root\n \u251c\u2500\u2500 __init__.py\n \u251c\u2500\u2500 sub1\n \u2502 \u251c\u2500\u2500 __init__.py\n \u2502 \u2514\u2500\u2500 sub1_1\n \u2502 \u251c\u2500\u2500 __init__.py\n \u2502 \u2514\u2500\u2500 test_in_sub1.py\n \u2514\u2500\u2500 sub2\n \u2514\u2500\u2500 test\n \u2514\u2500\u2500 test_in_sub2.py\n\n \"\"\"\n root = testdir.mkpydir(\"root\")\n sub1 = root.mkdir(\"sub1\")\n sub1.ensure(\"__init__.py\")\n sub1_test = sub1.mkdir(\"sub1_1\")\n sub1_test.ensure(\"__init__.py\")\n sub2 = root.mkdir(\"sub2\")\n sub2_test = sub2.mkdir(\"sub2\")\n\n sub1_test.join(\"test_in_sub1.py\").write(\"def test_1(): pass\")\n sub2_test.join(\"test_in_sub2.py\").write(\"def test_2(): pass\")\n\n # Execute from .\n result = testdir.runpytest(\"-v\", \"-s\")\n result.assert_outcomes(passed=2)\n\n # Execute from . with one argument \"root\"\n result = testdir.runpytest(\"-v\", \"-s\", \"root\")\n result.assert_outcomes(passed=2)\n\n # Chdir into package's root and execute with no args\n root.chdir()\n result = testdir.runpytest(\"-v\", \"-s\")\n result.assert_outcomes(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_package_ordering_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/collect.py_test_package_ordering_", "embedding": null, "metadata": {"file_path": "testing/python/collect.py", "file_name": "collect.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1349, "end_line": 1382, "span_ids": ["test_package_ordering"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_package_ordering(testdir):\n \"\"\"\n .\n \u2514\u2500\u2500 root\n \u251c\u2500\u2500 Test_root.py\n \u251c\u2500\u2500 __init__.py\n \u251c\u2500\u2500 sub1\n \u2502 \u251c\u2500\u2500 Test_sub1.py\n \u2502 \u2514\u2500\u2500 __init__.py\n \u2514\u2500\u2500 sub2\n \u2514\u2500\u2500 test\n \u2514\u2500\u2500 test_sub2.py\n\n \"\"\"\n testdir.makeini(\n \"\"\"\n [pytest]\n python_files=*.py\n \"\"\"\n )\n root = testdir.mkpydir(\"root\")\n sub1 = root.mkdir(\"sub1\")\n sub1.ensure(\"__init__.py\")\n sub2 = root.mkdir(\"sub2\")\n sub2_test = sub2.mkdir(\"sub2\")\n\n root.join(\"Test_root.py\").write(\"def test_1(): pass\")\n sub1.join(\"Test_sub1.py\").write(\"def test_2(): pass\")\n sub2_test.join(\"test_sub2.py\").write(\"def test_3(): pass\")\n\n # Execute from .\n result = testdir.runpytest(\"-v\", \"-s\")\n result.assert_outcomes(passed=3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py__coding_utf_8__test_getfuncargnames.None_5": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py__coding_utf_8__test_getfuncargnames.None_5", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 44, "span_ids": ["docstring", "test_getfuncargnames", "test_getfuncargnames.A", "imports", "test_getfuncargnames.A.f"], "tokens": 259}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -*- coding: utf-8 -*-\nimport sys\nimport textwrap\n\nimport pytest\nfrom _pytest import fixtures\nfrom _pytest.fixtures import FixtureLookupError\nfrom _pytest.fixtures import FixtureRequest\nfrom _pytest.pathlib import Path\nfrom _pytest.pytester import get_public_names\nfrom _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG\n\n\ndef test_getfuncargnames():\n def f():\n pass\n\n assert not fixtures.getfuncargnames(f)\n\n def g(arg):\n pass\n\n assert fixtures.getfuncargnames(g) == (\"arg\",)\n\n def h(arg1, arg2=\"hello\"):\n pass\n\n assert fixtures.getfuncargnames(h) == (\"arg1\",)\n\n def h(arg1, arg2, arg3=\"hello\"):\n pass\n\n assert fixtures.getfuncargnames(h) == (\"arg1\", \"arg2\")\n\n class A(object):\n def f(self, arg1, arg2=\"hello\"):\n pass\n\n @staticmethod\n def static(arg1, arg2):\n pass\n\n assert fixtures.getfuncargnames(A().f) == (\"arg1\",)\n assert fixtures.getfuncargnames(A.static, cls=A) == (\"arg1\", \"arg2\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures_TestFillFixtures.test_extend_fixture_conftest_conftest.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures_TestFillFixtures.test_extend_fixture_conftest_conftest.None_1", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 47, "end_line": 115, "span_ids": ["TestFillFixtures.test_funcarg_lookupfails", "TestFillFixtures.test_funcarg_basic", "TestFillFixtures.test_funcarg_lookup_classlevel", "TestFillFixtures.test_conftest_funcargs_only_available_in_subdir", "TestFillFixtures.test_extend_fixture_conftest_module", "TestFillFixtures.test_fillfuncargs_exposed", "TestFillFixtures.test_detect_recursive_dependency_error", "TestFillFixtures.test_extend_fixture_conftest_conftest", "TestFillFixtures", "TestFillFixtures.test_funcarg_lookup_modulelevel", "TestFillFixtures.test_extend_fixture_module_class"], "tokens": 593}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.pytester_example_path(\"fixtures/fill_fixtures\")\nclass TestFillFixtures(object):\n def test_fillfuncargs_exposed(self):\n # used by oejskit, kept for compatibility\n assert pytest._fillfuncargs == fixtures.fillfixtures\n\n def test_funcarg_lookupfails(self, testdir):\n testdir.copy_example()\n result = testdir.runpytest() # \"--collect-only\")\n assert result.ret != 0\n result.stdout.fnmatch_lines(\n \"\"\"\n *def test_func(some)*\n *fixture*some*not found*\n *xyzsomething*\n \"\"\"\n )\n\n def test_detect_recursive_dependency_error(self, testdir):\n testdir.copy_example()\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\"*recursive dependency involving fixture 'fix1' detected*\"]\n )\n\n def test_funcarg_basic(self, testdir):\n testdir.copy_example()\n item = testdir.getitem(Path(\"test_funcarg_basic.py\"))\n fixtures.fillfixtures(item)\n del item.funcargs[\"request\"]\n assert len(get_public_names(item.funcargs)) == 2\n assert item.funcargs[\"some\"] == \"test_func\"\n assert item.funcargs[\"other\"] == 42\n\n def test_funcarg_lookup_modulelevel(self, testdir):\n testdir.copy_example()\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=2)\n\n def test_funcarg_lookup_classlevel(self, testdir):\n p = testdir.copy_example()\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines([\"*1 passed*\"])\n\n def test_conftest_funcargs_only_available_in_subdir(self, testdir):\n testdir.copy_example()\n result = testdir.runpytest(\"-v\")\n result.assert_outcomes(passed=2)\n\n def test_extend_fixture_module_class(self, testdir):\n testfile = testdir.copy_example()\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*1 passed*\"])\n result = testdir.runpytest(testfile)\n result.stdout.fnmatch_lines([\"*1 passed*\"])\n\n def test_extend_fixture_conftest_module(self, testdir):\n p = testdir.copy_example()\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*1 passed*\"])\n result = testdir.runpytest(next(p.visit(\"test_*.py\")))\n result.stdout.fnmatch_lines([\"*1 passed*\"])\n\n def test_extend_fixture_conftest_conftest(self, testdir):\n p = testdir.copy_example()\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*1 passed*\"])\n result = testdir.runpytest(next(p.visit(\"test_*.py\")))\n result.stdout.fnmatch_lines([\"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_extend_fixture_conftest_plugin_TestFillFixtures.test_extend_fixture_conftest_plugin.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_extend_fixture_conftest_plugin_TestFillFixtures.test_extend_fixture_conftest_plugin.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 117, "end_line": 146, "span_ids": ["TestFillFixtures.test_extend_fixture_conftest_plugin"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.pytester_example_path(\"fixtures/fill_fixtures\")\nclass TestFillFixtures(object):\n\n def test_extend_fixture_conftest_plugin(self, testdir):\n testdir.makepyfile(\n testplugin=\"\"\"\n import pytest\n\n @pytest.fixture\n def foo():\n return 7\n \"\"\"\n )\n testdir.syspathinsert()\n testdir.makeconftest(\n \"\"\"\n import pytest\n\n pytest_plugins = 'testplugin'\n\n @pytest.fixture\n def foo(foo):\n return foo + 7\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n def test_foo(foo):\n assert foo == 14\n \"\"\"\n )\n result = testdir.runpytest(\"-s\")\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_extend_fixture_plugin_plugin_TestFillFixtures.test_extend_fixture_plugin_plugin.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_extend_fixture_plugin_plugin_TestFillFixtures.test_extend_fixture_plugin_plugin.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 148, "end_line": 178, "span_ids": ["TestFillFixtures.test_extend_fixture_plugin_plugin"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.pytester_example_path(\"fixtures/fill_fixtures\")\nclass TestFillFixtures(object):\n\n def test_extend_fixture_plugin_plugin(self, testdir):\n # Two plugins should extend each order in loading order\n testdir.makepyfile(\n testplugin0=\"\"\"\n import pytest\n\n @pytest.fixture\n def foo():\n return 7\n \"\"\"\n )\n testdir.makepyfile(\n testplugin1=\"\"\"\n import pytest\n\n @pytest.fixture\n def foo(foo):\n return foo + 7\n \"\"\"\n )\n testdir.syspathinsert()\n testdir.makepyfile(\n \"\"\"\n pytest_plugins = ['testplugin0', 'testplugin1']\n\n def test_foo(foo):\n assert foo == 14\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_override_parametrized_fixture_conftest_module_TestFillFixtures.test_override_parametrized_fixture_conftest_module.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_override_parametrized_fixture_conftest_module_TestFillFixtures.test_override_parametrized_fixture_conftest_module.None_2", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 180, "end_line": 206, "span_ids": ["TestFillFixtures.test_override_parametrized_fixture_conftest_module"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.pytester_example_path(\"fixtures/fill_fixtures\")\nclass TestFillFixtures(object):\n\n def test_override_parametrized_fixture_conftest_module(self, testdir):\n \"\"\"Test override of the parametrized fixture with non-parametrized one on the test module level.\"\"\"\n testdir.makeconftest(\n \"\"\"\n import pytest\n\n @pytest.fixture(params=[1, 2, 3])\n def spam(request):\n return request.param\n \"\"\"\n )\n testfile = testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture\n def spam():\n return 'spam'\n\n def test_spam(spam):\n assert spam == 'spam'\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*1 passed*\"])\n result = testdir.runpytest(testfile)\n result.stdout.fnmatch_lines([\"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_override_parametrized_fixture_conftest_conftest_TestFillFixtures.test_override_parametrized_fixture_conftest_conftest.None_4": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_override_parametrized_fixture_conftest_conftest_TestFillFixtures.test_override_parametrized_fixture_conftest_conftest.None_4", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 208, "end_line": 243, "span_ids": ["TestFillFixtures.test_override_parametrized_fixture_conftest_conftest"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.pytester_example_path(\"fixtures/fill_fixtures\")\nclass TestFillFixtures(object):\n\n def test_override_parametrized_fixture_conftest_conftest(self, testdir):\n \"\"\"Test override of the parametrized fixture with non-parametrized one on the conftest level.\"\"\"\n testdir.makeconftest(\n \"\"\"\n import pytest\n\n @pytest.fixture(params=[1, 2, 3])\n def spam(request):\n return request.param\n \"\"\"\n )\n subdir = testdir.mkpydir(\"subdir\")\n subdir.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n\n @pytest.fixture\n def spam():\n return 'spam'\n \"\"\"\n )\n )\n testfile = subdir.join(\"test_spam.py\")\n testfile.write(\n textwrap.dedent(\n \"\"\"\\\n def test_spam(spam):\n assert spam == \"spam\"\n \"\"\"\n )\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*1 passed*\"])\n result = testdir.runpytest(testfile)\n result.stdout.fnmatch_lines([\"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_override_non_parametrized_fixture_conftest_module_TestFillFixtures.test_override_non_parametrized_fixture_conftest_module.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_override_non_parametrized_fixture_conftest_module_TestFillFixtures.test_override_non_parametrized_fixture_conftest_module.None_2", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 245, "end_line": 274, "span_ids": ["TestFillFixtures.test_override_non_parametrized_fixture_conftest_module"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.pytester_example_path(\"fixtures/fill_fixtures\")\nclass TestFillFixtures(object):\n\n def test_override_non_parametrized_fixture_conftest_module(self, testdir):\n \"\"\"Test override of the non-parametrized fixture with parametrized one on the test module level.\"\"\"\n testdir.makeconftest(\n \"\"\"\n import pytest\n\n @pytest.fixture\n def spam():\n return 'spam'\n \"\"\"\n )\n testfile = testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture(params=[1, 2, 3])\n def spam(request):\n return request.param\n\n params = {'spam': 1}\n\n def test_spam(spam):\n assert spam == params['spam']\n params['spam'] += 1\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*3 passed*\"])\n result = testdir.runpytest(testfile)\n result.stdout.fnmatch_lines([\"*3 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_override_non_parametrized_fixture_conftest_conftest_TestFillFixtures.test_override_non_parametrized_fixture_conftest_conftest.None_4": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_override_non_parametrized_fixture_conftest_conftest_TestFillFixtures.test_override_non_parametrized_fixture_conftest_conftest.None_4", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 276, "end_line": 314, "span_ids": ["TestFillFixtures.test_override_non_parametrized_fixture_conftest_conftest"], "tokens": 271}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.pytester_example_path(\"fixtures/fill_fixtures\")\nclass TestFillFixtures(object):\n\n def test_override_non_parametrized_fixture_conftest_conftest(self, testdir):\n \"\"\"Test override of the non-parametrized fixture with parametrized one on the conftest level.\"\"\"\n testdir.makeconftest(\n \"\"\"\n import pytest\n\n @pytest.fixture\n def spam():\n return 'spam'\n \"\"\"\n )\n subdir = testdir.mkpydir(\"subdir\")\n subdir.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n\n @pytest.fixture(params=[1, 2, 3])\n def spam(request):\n return request.param\n \"\"\"\n )\n )\n testfile = subdir.join(\"test_spam.py\")\n testfile.write(\n textwrap.dedent(\n \"\"\"\\\n params = {'spam': 1}\n\n def test_spam(spam):\n assert spam == params['spam']\n params['spam'] += 1\n \"\"\"\n )\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*3 passed*\"])\n result = testdir.runpytest(testfile)\n result.stdout.fnmatch_lines([\"*3 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_override_autouse_fixture_with_parametrized_fixture_conftest_conftest_TestFillFixtures.test_override_autouse_fixture_with_parametrized_fixture_conftest_conftest.None_4": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_override_autouse_fixture_with_parametrized_fixture_conftest_conftest_TestFillFixtures.test_override_autouse_fixture_with_parametrized_fixture_conftest_conftest.None_4", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 316, "end_line": 358, "span_ids": ["TestFillFixtures.test_override_autouse_fixture_with_parametrized_fixture_conftest_conftest"], "tokens": 295}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.pytester_example_path(\"fixtures/fill_fixtures\")\nclass TestFillFixtures(object):\n\n def test_override_autouse_fixture_with_parametrized_fixture_conftest_conftest(\n self, testdir\n ):\n \"\"\"Test override of the autouse fixture with parametrized one on the conftest level.\n This test covers the issue explained in issue 1601\n \"\"\"\n testdir.makeconftest(\n \"\"\"\n import pytest\n\n @pytest.fixture(autouse=True)\n def spam():\n return 'spam'\n \"\"\"\n )\n subdir = testdir.mkpydir(\"subdir\")\n subdir.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n\n @pytest.fixture(params=[1, 2, 3])\n def spam(request):\n return request.param\n \"\"\"\n )\n )\n testfile = subdir.join(\"test_spam.py\")\n testfile.write(\n textwrap.dedent(\n \"\"\"\\\n params = {'spam': 1}\n\n def test_spam(spam):\n assert spam == params['spam']\n params['spam'] += 1\n \"\"\"\n )\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*3 passed*\"])\n result = testdir.runpytest(testfile)\n result.stdout.fnmatch_lines([\"*3 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_autouse_fixture_plugin_TestFillFixtures.test_autouse_fixture_plugin.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_autouse_fixture_plugin_TestFillFixtures.test_autouse_fixture_plugin.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 360, "end_line": 382, "span_ids": ["TestFillFixtures.test_autouse_fixture_plugin"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.pytester_example_path(\"fixtures/fill_fixtures\")\nclass TestFillFixtures(object):\n\n def test_autouse_fixture_plugin(self, testdir):\n # A fixture from a plugin has no baseid set, which screwed up\n # the autouse fixture handling.\n testdir.makepyfile(\n testplugin=\"\"\"\n import pytest\n\n @pytest.fixture(autouse=True)\n def foo(request):\n request.function.foo = 7\n \"\"\"\n )\n testdir.syspathinsert()\n testdir.makepyfile(\n \"\"\"\n pytest_plugins = 'testplugin'\n\n def test_foo(request):\n assert request.function.foo == 7\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_funcarg_lookup_error_TestFillFixtures.test_funcarg_lookup_error.assert_INTERNAL_not_in_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_funcarg_lookup_error_TestFillFixtures.test_funcarg_lookup_error.assert_INTERNAL_not_in_", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 384, "end_line": 419, "span_ids": ["TestFillFixtures.test_funcarg_lookup_error"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.pytester_example_path(\"fixtures/fill_fixtures\")\nclass TestFillFixtures(object):\n\n def test_funcarg_lookup_error(self, testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n\n @pytest.fixture\n def a_fixture(): pass\n\n @pytest.fixture\n def b_fixture(): pass\n\n @pytest.fixture\n def c_fixture(): pass\n\n @pytest.fixture\n def d_fixture(): pass\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n def test_lookup_error(unknown):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"*ERROR at setup of test_lookup_error*\",\n \" def test_lookup_error(unknown):*\",\n \"E fixture 'unknown' not found\",\n \"> available fixtures:*a_fixture,*b_fixture,*c_fixture,*d_fixture*monkeypatch,*\", # sorted\n \"> use 'py*test --fixtures *' for help on them.\",\n \"*1 error*\",\n ]\n )\n assert \"INTERNAL\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_fixture_excinfo_leak_TestFillFixtures.test_fixture_excinfo_leak.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFillFixtures.test_fixture_excinfo_leak_TestFillFixtures.test_fixture_excinfo_leak.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 421, "end_line": 443, "span_ids": ["TestFillFixtures.test_fixture_excinfo_leak"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.pytester_example_path(\"fixtures/fill_fixtures\")\nclass TestFillFixtures(object):\n\n def test_fixture_excinfo_leak(self, testdir):\n # on python2 sys.excinfo would leak into fixture executions\n testdir.makepyfile(\n \"\"\"\n import sys\n import traceback\n import pytest\n\n @pytest.fixture\n def leak():\n if sys.exc_info()[0]: # python3 bug :)\n traceback.print_exc()\n #fails\n assert sys.exc_info() == (None, None, None)\n\n def test_leak(leak):\n if sys.exc_info()[0]: # python3 bug :)\n traceback.print_exc()\n assert sys.exc_info() == (None, None, None)\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic_TestRequestBasic.test_request_attributes_method.assert_req_instance___cla": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic_TestRequestBasic.test_request_attributes_method.assert_req_instance___cla", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 446, "end_line": 481, "span_ids": ["TestRequestBasic.test_request_attributes", "TestRequestBasic", "TestRequestBasic.test_request_attributes_method"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRequestBasic(object):\n def test_request_attributes(self, testdir):\n item = testdir.getitem(\n \"\"\"\n import pytest\n\n @pytest.fixture\n def something(request): pass\n def test_func(something): pass\n \"\"\"\n )\n req = fixtures.FixtureRequest(item)\n assert req.function == item.obj\n assert req.keywords == item.keywords\n assert hasattr(req.module, \"test_func\")\n assert req.cls is None\n assert req.function.__name__ == \"test_func\"\n assert req.config == item.config\n assert repr(req).find(req.function.__name__) != -1\n\n def test_request_attributes_method(self, testdir):\n item, = testdir.getitems(\n \"\"\"\n import pytest\n class TestB(object):\n\n @pytest.fixture\n def something(self, request):\n return 1\n def test_func(self, something):\n pass\n \"\"\"\n )\n req = item._request\n assert req.cls.__name__ == \"TestB\"\n assert req.instance.__class__ == req.cls", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_contains_funcarg_arg2fixturedefs_TestRequestBasic.test_request_contains_funcarg_arg2fixturedefs.assert_arg2fixturedefs_s": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_contains_funcarg_arg2fixturedefs_TestRequestBasic.test_request_contains_funcarg_arg2fixturedefs.assert_arg2fixturedefs_s", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 483, "end_line": 499, "span_ids": ["TestRequestBasic.test_request_contains_funcarg_arg2fixturedefs"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRequestBasic(object):\n\n def test_request_contains_funcarg_arg2fixturedefs(self, testdir):\n modcol = testdir.getmodulecol(\n \"\"\"\n import pytest\n @pytest.fixture\n def something(request):\n pass\n class TestClass(object):\n def test_method(self, something):\n pass\n \"\"\"\n )\n item1, = testdir.genitems([modcol])\n assert item1.name == \"test_method\"\n arg2fixturedefs = fixtures.FixtureRequest(item1)._arg2fixturedefs\n assert len(arg2fixturedefs) == 1\n assert arg2fixturedefs[\"something\"][0].argname == \"something\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_garbage_TestRequestBasic.test_request_garbage.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_garbage_TestRequestBasic.test_request_garbage.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 501, "end_line": 539, "span_ids": ["TestRequestBasic.test_request_garbage"], "tokens": 235}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRequestBasic(object):\n\n @pytest.mark.skipif(\n hasattr(sys, \"pypy_version_info\"),\n reason=\"this method of test doesn't work on pypy\",\n )\n def test_request_garbage(self, testdir):\n try:\n import xdist # noqa\n except ImportError:\n pass\n else:\n pytest.xfail(\"this test is flaky when executed with xdist\")\n testdir.makepyfile(\n \"\"\"\n import sys\n import pytest\n from _pytest.fixtures import PseudoFixtureDef\n import gc\n\n @pytest.fixture(autouse=True)\n def something(request):\n original = gc.get_debug()\n gc.set_debug(gc.DEBUG_SAVEALL)\n gc.collect()\n\n yield\n\n try:\n gc.collect()\n leaked = [x for _ in gc.garbage if isinstance(_, PseudoFixtureDef)]\n assert leaked == []\n finally:\n gc.set_debug(original)\n\n def test_func():\n pass\n \"\"\"\n )\n result = testdir.runpytest_subprocess()\n result.stdout.fnmatch_lines([\"* 1 passed in *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_getfixturevalue_recursive_TestRequestBasic.test_getfixturevalue_recursive.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_getfixturevalue_recursive_TestRequestBasic.test_getfixturevalue_recursive.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 541, "end_line": 563, "span_ids": ["TestRequestBasic.test_getfixturevalue_recursive"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRequestBasic(object):\n\n def test_getfixturevalue_recursive(self, testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n\n @pytest.fixture\n def something(request):\n return 1\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture\n def something(request):\n return request.getfixturevalue(\"something\") + 1\n def test_func(something):\n assert something == 2\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_getfixturevalue_teardown_TestRequestBasic.test_getfixturevalue_teardown.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_getfixturevalue_teardown_TestRequestBasic.test_getfixturevalue_teardown.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 565, "end_line": 601, "span_ids": ["TestRequestBasic.test_getfixturevalue_teardown"], "tokens": 247}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRequestBasic(object):\n\n def test_getfixturevalue_teardown(self, testdir):\n \"\"\"\n Issue #1895\n\n `test_inner` requests `inner` fixture, which in turn requests `resource`\n using `getfixturevalue`. `test_func` then requests `resource`.\n\n `resource` is teardown before `inner` because the fixture mechanism won't consider\n `inner` dependent on `resource` when it is used via `getfixturevalue`: `test_func`\n will then cause the `resource`'s finalizer to be called first because of this.\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture(scope='session')\n def resource():\n r = ['value']\n yield r\n r.pop()\n\n @pytest.fixture(scope='session')\n def inner(request):\n resource = request.getfixturevalue('resource')\n assert resource == ['value']\n yield\n assert resource == ['value']\n\n def test_inner(inner):\n pass\n\n def test_func(resource):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"* 2 passed in *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_getfixturevalue_TestRequestBasic.test_getfixturevalue.with_warning_expectation_.assert_request_in_item_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_getfixturevalue_TestRequestBasic.test_getfixturevalue.with_warning_expectation_.assert_request_in_item_", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 603, "end_line": 645, "span_ids": ["TestRequestBasic.test_getfixturevalue"], "tokens": 336}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRequestBasic(object):\n\n @pytest.mark.parametrize(\"getfixmethod\", (\"getfixturevalue\", \"getfuncargvalue\"))\n def test_getfixturevalue(self, testdir, getfixmethod):\n item = testdir.getitem(\n \"\"\"\n import pytest\n values = [2]\n @pytest.fixture\n def something(request): return 1\n @pytest.fixture\n def other(request):\n return values.pop()\n def test_func(something): pass\n \"\"\"\n )\n import contextlib\n\n if getfixmethod == \"getfuncargvalue\":\n warning_expectation = pytest.warns(DeprecationWarning)\n else:\n # see #1830 for a cleaner way to accomplish this\n @contextlib.contextmanager\n def expecting_no_warning():\n yield\n\n warning_expectation = expecting_no_warning()\n\n req = item._request\n with warning_expectation:\n fixture_fetcher = getattr(req, getfixmethod)\n with pytest.raises(FixtureLookupError):\n fixture_fetcher(\"notexists\")\n val = fixture_fetcher(\"something\")\n assert val == 1\n val = fixture_fetcher(\"something\")\n assert val == 1\n val2 = fixture_fetcher(\"other\")\n assert val2 == 2\n val2 = fixture_fetcher(\"other\") # see about caching\n assert val2 == 2\n pytest._fillfuncargs(item)\n assert item.funcargs[\"something\"] == 1\n assert len(get_public_names(item.funcargs)) == 2\n assert \"request\" in item.funcargs", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_addfinalizer_TestRequestBasic.test_request_addfinalizer.assert_teardownlist_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_addfinalizer_TestRequestBasic.test_request_addfinalizer.assert_teardownlist_1", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 647, "end_line": 666, "span_ids": ["TestRequestBasic.test_request_addfinalizer"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRequestBasic(object):\n\n def test_request_addfinalizer(self, testdir):\n item = testdir.getitem(\n \"\"\"\n import pytest\n teardownlist = []\n @pytest.fixture\n def something(request):\n request.addfinalizer(lambda: teardownlist.append(1))\n def test_func(something): pass\n \"\"\"\n )\n item.session._setupstate.prepare(item)\n pytest._fillfuncargs(item)\n # successively check finalization calls\n teardownlist = item.getparent(pytest.Module).obj.teardownlist\n ss = item.session._setupstate\n assert not teardownlist\n ss.teardown_exact(item, None)\n print(ss.stack)\n assert teardownlist == [1]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_addfinalizer_failing_setup_TestRequestBasic.test_request_addfinalizer_failing_setup.reprec_assertoutcome_fail": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_addfinalizer_failing_setup_TestRequestBasic.test_request_addfinalizer_failing_setup.reprec_assertoutcome_fail", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 668, "end_line": 684, "span_ids": ["TestRequestBasic.test_request_addfinalizer_failing_setup"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRequestBasic(object):\n\n def test_request_addfinalizer_failing_setup(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n values = [1]\n @pytest.fixture\n def myfix(request):\n request.addfinalizer(values.pop)\n assert 0\n def test_fix(myfix):\n pass\n def test_finalizer_ran():\n assert not values\n \"\"\"\n )\n reprec = testdir.inline_run(\"-s\")\n reprec.assertoutcome(failed=1, passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_addfinalizer_failing_setup_module_TestRequestBasic.test_request_addfinalizer_failing_setup_module.assert_not_mod_values": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_addfinalizer_failing_setup_module_TestRequestBasic.test_request_addfinalizer_failing_setup_module.assert_not_mod_values", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 686, "end_line": 702, "span_ids": ["TestRequestBasic.test_request_addfinalizer_failing_setup_module"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRequestBasic(object):\n\n def test_request_addfinalizer_failing_setup_module(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n values = [1, 2]\n @pytest.fixture(scope=\"module\")\n def myfix(request):\n request.addfinalizer(values.pop)\n request.addfinalizer(values.pop)\n assert 0\n def test_fix(myfix):\n pass\n \"\"\"\n )\n reprec = testdir.inline_run(\"-s\")\n mod = reprec.getcalls(\"pytest_runtest_setup\")[0].item.module\n assert not mod.values", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_addfinalizer_partial_setup_failure_TestRequestBasic.test_request_addfinalizer_partial_setup_failure.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_addfinalizer_partial_setup_failure_TestRequestBasic.test_request_addfinalizer_partial_setup_failure.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 704, "end_line": 721, "span_ids": ["TestRequestBasic.test_request_addfinalizer_partial_setup_failure"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRequestBasic(object):\n\n def test_request_addfinalizer_partial_setup_failure(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n values = []\n @pytest.fixture\n def something(request):\n request.addfinalizer(lambda: values.append(None))\n def test_func(something, missingarg):\n pass\n def test_second():\n assert len(values) == 1\n \"\"\"\n )\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines(\n [\"*1 error*\"] # XXX the whole module collection fails\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_subrequest_addfinalizer_exceptions_TestRequestBasic.test_request_subrequest_addfinalizer_exceptions.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_subrequest_addfinalizer_exceptions_TestRequestBasic.test_request_subrequest_addfinalizer_exceptions.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 723, "end_line": 755, "span_ids": ["TestRequestBasic.test_request_subrequest_addfinalizer_exceptions"], "tokens": 273}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRequestBasic(object):\n\n def test_request_subrequest_addfinalizer_exceptions(self, testdir):\n \"\"\"\n Ensure exceptions raised during teardown by a finalizer are suppressed\n until all finalizers are called, re-raising the first exception (#2440)\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n values = []\n def _excepts(where):\n raise Exception('Error in %s fixture' % where)\n @pytest.fixture\n def subrequest(request):\n return request\n @pytest.fixture\n def something(subrequest):\n subrequest.addfinalizer(lambda: values.append(1))\n subrequest.addfinalizer(lambda: values.append(2))\n subrequest.addfinalizer(lambda: _excepts('something'))\n @pytest.fixture\n def excepts(subrequest):\n subrequest.addfinalizer(lambda: _excepts('excepts'))\n subrequest.addfinalizer(lambda: values.append(3))\n def test_first(something, excepts):\n pass\n def test_second():\n assert values == [3, 2, 1]\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\"*Exception: Error in excepts fixture\", \"* 2 passed, 1 error in *\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_getmodulepath_TestRequestBasic.test_request_fixturenames.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_getmodulepath_TestRequestBasic.test_request_fixturenames.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 757, "end_line": 784, "span_ids": ["TestRequestBasic.test_request_getmodulepath", "TestRequestBasic.test_request_fixturenames"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRequestBasic(object):\n\n def test_request_getmodulepath(self, testdir):\n modcol = testdir.getmodulecol(\"def test_somefunc(): pass\")\n item, = testdir.genitems([modcol])\n req = fixtures.FixtureRequest(item)\n assert req.fspath == modcol.fspath\n\n def test_request_fixturenames(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n from _pytest.pytester import get_public_names\n @pytest.fixture()\n def arg1():\n pass\n @pytest.fixture()\n def farg(arg1):\n pass\n @pytest.fixture(autouse=True)\n def sarg(tmpdir):\n pass\n def test_function(request, farg):\n assert set(get_public_names(request.fixturenames)) == \\\n set([\"tmpdir\", \"sarg\", \"arg1\", \"request\", \"farg\",\n \"tmp_path\", \"tmp_path_factory\"])\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_fixturenames_dynamic_fixture_TestRequestBasic.test_funcargnames_compatattr.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_request_fixturenames_dynamic_fixture_TestRequestBasic.test_funcargnames_compatattr.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 786, "end_line": 809, "span_ids": ["TestRequestBasic.test_request_fixturenames_dynamic_fixture", "TestRequestBasic.test_funcargnames_compatattr"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRequestBasic(object):\n\n def test_request_fixturenames_dynamic_fixture(self, testdir):\n \"\"\"Regression test for #3057\"\"\"\n testdir.copy_example(\"fixtures/test_getfixturevalue_dynamic.py\")\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*1 passed*\"])\n\n def test_funcargnames_compatattr(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n def pytest_generate_tests(metafunc):\n assert metafunc.funcargnames == metafunc.fixturenames\n @pytest.fixture\n def fn(request):\n assert request._pyfuncitem.funcargnames == \\\n request._pyfuncitem.fixturenames\n return request.funcargnames, request.fixturenames\n\n def test_hello(fn):\n assert fn[0] == fn[1]\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_setupdecorator_and_xunit_TestRequestBasic.test_setupdecorator_and_xunit.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_setupdecorator_and_xunit_TestRequestBasic.test_setupdecorator_and_xunit.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 811, "end_line": 841, "span_ids": ["TestRequestBasic.test_setupdecorator_and_xunit"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRequestBasic(object):\n\n def test_setupdecorator_and_xunit(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n values = []\n @pytest.fixture(scope='module', autouse=True)\n def setup_module():\n values.append(\"module\")\n @pytest.fixture(autouse=True)\n def setup_function():\n values.append(\"function\")\n\n def test_func():\n pass\n\n class TestClass(object):\n @pytest.fixture(scope=\"class\", autouse=True)\n def setup_class(self):\n values.append(\"class\")\n @pytest.fixture(autouse=True)\n def setup_method(self):\n values.append(\"method\")\n def test_method(self):\n pass\n def test_all():\n assert values == [\"module\", \"function\", \"class\",\n \"function\", \"method\", \"function\"]\n \"\"\"\n )\n reprec = testdir.inline_run(\"-v\")\n reprec.assertoutcome(passed=3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_fixtures_sub_subdir_normalize_sep_TestRequestBasic.test_fixtures_sub_subdir_normalize_sep.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_fixtures_sub_subdir_normalize_sep_TestRequestBasic.test_fixtures_sub_subdir_normalize_sep.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 843, "end_line": 865, "span_ids": ["TestRequestBasic.test_fixtures_sub_subdir_normalize_sep"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRequestBasic(object):\n\n def test_fixtures_sub_subdir_normalize_sep(self, testdir):\n # this tests that normalization of nodeids takes place\n b = testdir.mkdir(\"tests\").mkdir(\"unit\")\n b.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n @pytest.fixture\n def arg1():\n pass\n \"\"\"\n )\n )\n p = b.join(\"test_module.py\")\n p.write(\"def test_func(arg1): pass\")\n result = testdir.runpytest(p, \"--fixtures\")\n assert result.ret == 0\n result.stdout.fnmatch_lines(\n \"\"\"\n *fixtures defined*conftest*\n *arg1*\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_show_fixtures_color_yes_TestRequestBasic.test_newstyle_with_request.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_show_fixtures_color_yes_TestRequestBasic.test_newstyle_with_request.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 867, "end_line": 884, "span_ids": ["TestRequestBasic.test_show_fixtures_color_yes", "TestRequestBasic.test_newstyle_with_request"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRequestBasic(object):\n\n def test_show_fixtures_color_yes(self, testdir):\n testdir.makepyfile(\"def test_this(): assert 1\")\n result = testdir.runpytest(\"--color=yes\", \"--fixtures\")\n assert \"\\x1b[32mtmpdir\" in result.stdout.str()\n\n def test_newstyle_with_request(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture()\n def arg(request):\n pass\n def test_1(arg):\n pass\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_setupcontext_no_param_TestRequestBasic.test_setupcontext_no_param.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestBasic.test_setupcontext_no_param_TestRequestBasic.test_setupcontext_no_param.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 886, "end_line": 902, "span_ids": ["TestRequestBasic.test_setupcontext_no_param"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRequestBasic(object):\n\n def test_setupcontext_no_param(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture(params=[1,2])\n def arg(request):\n return request.param\n\n @pytest.fixture(autouse=True)\n def mysetup(request, arg):\n assert not hasattr(request, \"param\")\n def test_1(arg):\n assert arg in (1,2)\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestMarking_TestRequestMarking.test_accesskeywords.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestMarking_TestRequestMarking.test_accesskeywords.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 905, "end_line": 945, "span_ids": ["TestRequestMarking.test_accesskeywords", "TestRequestMarking.test_applymarker", "TestRequestMarking"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRequestMarking(object):\n def test_applymarker(self, testdir):\n item1, item2 = testdir.getitems(\n \"\"\"\n import pytest\n\n @pytest.fixture\n def something(request):\n pass\n class TestClass(object):\n def test_func1(self, something):\n pass\n def test_func2(self, something):\n pass\n \"\"\"\n )\n req1 = fixtures.FixtureRequest(item1)\n assert \"xfail\" not in item1.keywords\n req1.applymarker(pytest.mark.xfail)\n assert \"xfail\" in item1.keywords\n assert \"skipif\" not in item1.keywords\n req1.applymarker(pytest.mark.skipif)\n assert \"skipif\" in item1.keywords\n with pytest.raises(ValueError):\n req1.applymarker(42)\n\n def test_accesskeywords(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture()\n def keywords(request):\n return request.keywords\n @pytest.mark.XYZ\n def test_function(keywords):\n assert keywords[\"XYZ\"]\n assert \"abc\" not in keywords\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestMarking.test_accessmarker_dynamic_TestRequestMarking.test_accessmarker_dynamic.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestMarking.test_accessmarker_dynamic_TestRequestMarking.test_accessmarker_dynamic.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 947, "end_line": 972, "span_ids": ["TestRequestMarking.test_accessmarker_dynamic"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRequestMarking(object):\n\n def test_accessmarker_dynamic(self, testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n @pytest.fixture()\n def keywords(request):\n return request.keywords\n\n @pytest.fixture(scope=\"class\", autouse=True)\n def marking(request):\n request.applymarker(pytest.mark.XYZ(\"hello\"))\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n import pytest\n def test_fun1(keywords):\n assert keywords[\"XYZ\"] is not None\n assert \"abc\" not in keywords\n def test_fun2(keywords):\n assert keywords[\"XYZ\"] is not None\n assert \"abc\" not in keywords\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages_TestFixtureUsages.test_receives_funcargs.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages_TestFixtureUsages.test_receives_funcargs.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 975, "end_line": 1011, "span_ids": ["TestFixtureUsages.test_receives_funcargs", "TestFixtureUsages", "TestFixtureUsages.test_noargfixturedec"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureUsages(object):\n def test_noargfixturedec(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture\n def arg1():\n return 1\n\n def test_func(arg1):\n assert arg1 == 1\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)\n\n def test_receives_funcargs(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture()\n def arg1():\n return 1\n\n @pytest.fixture()\n def arg2(arg1):\n return arg1 + 1\n\n def test_add(arg2):\n assert arg2 == 2\n def test_all(arg1, arg2):\n assert arg1 == 1\n assert arg2 == 2\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_receives_funcargs_scope_mismatch_TestFixtureUsages.test_receives_funcargs_scope_mismatch.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_receives_funcargs_scope_mismatch_TestFixtureUsages.test_receives_funcargs_scope_mismatch.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1013, "end_line": 1037, "span_ids": ["TestFixtureUsages.test_receives_funcargs_scope_mismatch"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureUsages(object):\n\n def test_receives_funcargs_scope_mismatch(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture(scope=\"function\")\n def arg1():\n return 1\n\n @pytest.fixture(scope=\"module\")\n def arg2(arg1):\n return arg1 + 1\n\n def test_add(arg2):\n assert arg2 == 2\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"*ScopeMismatch*involved factories*\",\n \"test_receives_funcargs_scope_mismatch.py:6: def arg2(arg1)\",\n \"test_receives_funcargs_scope_mismatch.py:2: def arg1()\",\n \"*1 error*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_receives_funcargs_scope_mismatch_issue660_TestFixtureUsages.test_invalid_scope.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_receives_funcargs_scope_mismatch_issue660_TestFixtureUsages.test_invalid_scope.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1039, "end_line": 1075, "span_ids": ["TestFixtureUsages.test_invalid_scope", "TestFixtureUsages.test_receives_funcargs_scope_mismatch_issue660"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureUsages(object):\n\n def test_receives_funcargs_scope_mismatch_issue660(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture(scope=\"function\")\n def arg1():\n return 1\n\n @pytest.fixture(scope=\"module\")\n def arg2(arg1):\n return arg1 + 1\n\n def test_add(arg1, arg2):\n assert arg2 == 2\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\"*ScopeMismatch*involved factories*\", \"* def arg2*\", \"*1 error*\"]\n )\n\n def test_invalid_scope(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture(scope=\"functions\")\n def badscope():\n pass\n\n def test_nothing(badscope):\n pass\n \"\"\"\n )\n result = testdir.runpytest_inprocess()\n result.stdout.fnmatch_lines(\n \"*Fixture 'badscope' from test_invalid_scope.py got an unexpected scope value 'functions'\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_funcarg_parametrized_and_used_twice_TestFixtureUsages.test_funcarg_parametrized_and_used_twice.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_funcarg_parametrized_and_used_twice_TestFixtureUsages.test_funcarg_parametrized_and_used_twice.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1077, "end_line": 1097, "span_ids": ["TestFixtureUsages.test_funcarg_parametrized_and_used_twice"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureUsages(object):\n\n def test_funcarg_parametrized_and_used_twice(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n values = []\n @pytest.fixture(params=[1,2])\n def arg1(request):\n values.append(1)\n return request.param\n\n @pytest.fixture()\n def arg2(arg1):\n return arg1 + 1\n\n def test_add(arg1, arg2):\n assert arg2 == arg1 + 1\n assert len(values) == arg1\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*2 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_factory_uses_unknown_funcarg_as_dependency_error_TestFixtureUsages.test_factory_uses_unknown_funcarg_as_dependency_error.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_factory_uses_unknown_funcarg_as_dependency_error_TestFixtureUsages.test_factory_uses_unknown_funcarg_as_dependency_error.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1099, "end_line": 1125, "span_ids": ["TestFixtureUsages.test_factory_uses_unknown_funcarg_as_dependency_error"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureUsages(object):\n\n def test_factory_uses_unknown_funcarg_as_dependency_error(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture()\n def fail(missing):\n return\n\n @pytest.fixture()\n def call_fail(fail):\n return\n\n def test_missing(call_fail):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n \"\"\"\n *pytest.fixture()*\n *def call_fail(fail)*\n *pytest.fixture()*\n *def fail*\n *fixture*'missing'*not found*\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_factory_setup_as_classes_fails_TestFixtureUsages.test_request_can_be_overridden.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_factory_setup_as_classes_fails_TestFixtureUsages.test_request_can_be_overridden.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1127, "end_line": 1156, "span_ids": ["TestFixtureUsages.test_factory_setup_as_classes_fails", "TestFixtureUsages.test_request_can_be_overridden"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureUsages(object):\n\n def test_factory_setup_as_classes_fails(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n class arg1(object):\n def __init__(self, request):\n self.x = 1\n arg1 = pytest.fixture()(arg1)\n\n \"\"\"\n )\n reprec = testdir.inline_run()\n values = reprec.getfailedcollections()\n assert len(values) == 1\n\n @pytest.mark.filterwarnings(\"ignore::pytest.PytestDeprecationWarning\")\n def test_request_can_be_overridden(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture()\n def request(request):\n request.a = 1\n return request\n def test_request(request):\n assert request.a == 1\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_usefixtures_marker_TestFixtureUsages.test_usefixtures_marker.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_usefixtures_marker_TestFixtureUsages.test_usefixtures_marker.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1158, "end_line": 1181, "span_ids": ["TestFixtureUsages.test_usefixtures_marker"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureUsages(object):\n\n def test_usefixtures_marker(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n values = []\n\n @pytest.fixture(scope=\"class\")\n def myfix(request):\n request.cls.hello = \"world\"\n values.append(1)\n\n class TestClass(object):\n def test_one(self):\n assert self.hello == \"world\"\n assert len(values) == 1\n def test_two(self):\n assert self.hello == \"world\"\n assert len(values) == 1\n pytest.mark.usefixtures(\"myfix\")(TestClass)\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_usefixtures_ini_TestFixtureUsages.test_usefixtures_ini.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_usefixtures_ini_TestFixtureUsages.test_usefixtures_ini.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1183, "end_line": 1210, "span_ids": ["TestFixtureUsages.test_usefixtures_ini"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureUsages(object):\n\n def test_usefixtures_ini(self, testdir):\n testdir.makeini(\n \"\"\"\n [pytest]\n usefixtures = myfix\n \"\"\"\n )\n testdir.makeconftest(\n \"\"\"\n import pytest\n\n @pytest.fixture(scope=\"class\")\n def myfix(request):\n request.cls.hello = \"world\"\n\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n class TestClass(object):\n def test_one(self):\n assert self.hello == \"world\"\n def test_two(self):\n assert self.hello == \"world\"\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_usefixtures_seen_in_showmarkers_TestFixtureUsages.test_request_instance_issue203.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_usefixtures_seen_in_showmarkers_TestFixtureUsages.test_request_instance_issue203.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1212, "end_line": 1235, "span_ids": ["TestFixtureUsages.test_usefixtures_seen_in_showmarkers", "TestFixtureUsages.test_request_instance_issue203"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureUsages(object):\n\n def test_usefixtures_seen_in_showmarkers(self, testdir):\n result = testdir.runpytest(\"--markers\")\n result.stdout.fnmatch_lines(\n \"\"\"\n *usefixtures(fixturename1*mark tests*fixtures*\n \"\"\"\n )\n\n def test_request_instance_issue203(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n class TestClass(object):\n @pytest.fixture\n def setup1(self, request):\n assert self == request.instance\n self.arg1 = 1\n def test_hello(self, setup1):\n assert self.arg1 == 1\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_fixture_parametrized_with_iterator_TestFixtureUsages.test_fixture_parametrized_with_iterator.assert_values_1_2_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_fixture_parametrized_with_iterator_TestFixtureUsages.test_fixture_parametrized_with_iterator.assert_values_1_2_1", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1237, "end_line": 1264, "span_ids": ["TestFixtureUsages.test_fixture_parametrized_with_iterator"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureUsages(object):\n\n def test_fixture_parametrized_with_iterator(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n values = []\n def f():\n yield 1\n yield 2\n dec = pytest.fixture(scope=\"module\", params=f())\n\n @dec\n def arg(request):\n return request.param\n @dec\n def arg2(request):\n return request.param\n\n def test_1(arg):\n values.append(arg)\n def test_2(arg2):\n values.append(arg2*10)\n \"\"\"\n )\n reprec = testdir.inline_run(\"-v\")\n reprec.assertoutcome(passed=4)\n values = reprec.getcalls(\"pytest_runtest_call\")[0].item.module.values\n assert values == [1, 2, 10, 20]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_setup_functions_as_fixtures_TestFixtureUsages.test_setup_functions_as_fixtures.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureUsages.test_setup_functions_as_fixtures_TestFixtureUsages.test_setup_functions_as_fixtures.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1266, "end_line": 1303, "span_ids": ["TestFixtureUsages.test_setup_functions_as_fixtures"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureUsages(object):\n\n def test_setup_functions_as_fixtures(self, testdir):\n \"\"\"Ensure setup_* methods obey fixture scope rules (#517, #3094).\"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n DB_INITIALIZED = None\n\n @pytest.yield_fixture(scope=\"session\", autouse=True)\n def db():\n global DB_INITIALIZED\n DB_INITIALIZED = True\n yield\n DB_INITIALIZED = False\n\n def setup_module():\n assert DB_INITIALIZED\n\n def teardown_module():\n assert DB_INITIALIZED\n\n class TestClass(object):\n\n def setup_method(self, method):\n assert DB_INITIALIZED\n\n def teardown_method(self, method):\n assert DB_INITIALIZED\n\n def test_printer_1(self):\n pass\n\n def test_printer_2(self):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"* 2 passed in *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureManagerParseFactories_TestFixtureManagerParseFactories.test_parsefactories_evil_objects_issue214.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureManagerParseFactories_TestFixtureManagerParseFactories.test_parsefactories_evil_objects_issue214.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1306, "end_line": 1343, "span_ids": ["TestFixtureManagerParseFactories.testdir", "TestFixtureManagerParseFactories.test_parsefactories_evil_objects_issue214", "TestFixtureManagerParseFactories"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureManagerParseFactories(object):\n @pytest.fixture\n def testdir(self, request):\n testdir = request.getfixturevalue(\"testdir\")\n testdir.makeconftest(\n \"\"\"\n import pytest\n\n @pytest.fixture\n def hello(request):\n return \"conftest\"\n\n @pytest.fixture\n def fm(request):\n return request._fixturemanager\n\n @pytest.fixture\n def item(request):\n return request._pyfuncitem\n \"\"\"\n )\n return testdir\n\n def test_parsefactories_evil_objects_issue214(self, testdir):\n testdir.makepyfile(\n \"\"\"\n class A(object):\n def __call__(self):\n pass\n def __getattr__(self, name):\n raise RuntimeError()\n a = A()\n def test_hello():\n pass\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1, failed=0)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureManagerParseFactories.test_parsefactories_conftest_TestFixtureManagerParseFactories.test_parsefactories_conftest.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureManagerParseFactories.test_parsefactories_conftest_TestFixtureManagerParseFactories.test_parsefactories_conftest.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1345, "end_line": 1357, "span_ids": ["TestFixtureManagerParseFactories.test_parsefactories_conftest"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureManagerParseFactories(object):\n\n def test_parsefactories_conftest(self, testdir):\n testdir.makepyfile(\n \"\"\"\n def test_hello(item, fm):\n for name in (\"fm\", \"hello\", \"item\"):\n faclist = fm.getfixturedefs(name, item.nodeid)\n assert len(faclist) == 1\n fac = faclist[0]\n assert fac.func.__name__ == name\n \"\"\"\n )\n reprec = testdir.inline_run(\"-s\")\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureManagerParseFactories.test_parsefactories_conftest_and_module_and_class_TestFixtureManagerParseFactories.test_parsefactories_conftest_and_module_and_class.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureManagerParseFactories.test_parsefactories_conftest_and_module_and_class_TestFixtureManagerParseFactories.test_parsefactories_conftest_and_module_and_class.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1359, "end_line": 1383, "span_ids": ["TestFixtureManagerParseFactories.test_parsefactories_conftest_and_module_and_class"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureManagerParseFactories(object):\n\n def test_parsefactories_conftest_and_module_and_class(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n import six\n\n @pytest.fixture\n def hello(request):\n return \"module\"\n class TestClass(object):\n @pytest.fixture\n def hello(self, request):\n return \"class\"\n def test_hello(self, item, fm):\n faclist = fm.getfixturedefs(\"hello\", item.nodeid)\n print(faclist)\n assert len(faclist) == 3\n\n assert faclist[0].func(item._request) == \"conftest\"\n assert faclist[1].func(item._request) == \"module\"\n assert faclist[2].func(item._request) == \"class\"\n \"\"\"\n )\n reprec = testdir.inline_run(\"-s\")\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureManagerParseFactories.test_parsefactories_relative_node_ids_TestFixtureManagerParseFactories.test_parsefactories_relative_node_ids.with_runner_as_cwd_.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureManagerParseFactories.test_parsefactories_relative_node_ids_TestFixtureManagerParseFactories.test_parsefactories_relative_node_ids.with_runner_as_cwd_.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1385, "end_line": 1432, "span_ids": ["TestFixtureManagerParseFactories.test_parsefactories_relative_node_ids"], "tokens": 303}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureManagerParseFactories(object):\n\n def test_parsefactories_relative_node_ids(self, testdir):\n # example mostly taken from:\n # https://mail.python.org/pipermail/pytest-dev/2014-September/002617.html\n runner = testdir.mkdir(\"runner\")\n package = testdir.mkdir(\"package\")\n package.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n @pytest.fixture\n def one():\n return 1\n \"\"\"\n )\n )\n package.join(\"test_x.py\").write(\n textwrap.dedent(\n \"\"\"\\\n def test_x(one):\n assert one == 1\n \"\"\"\n )\n )\n sub = package.mkdir(\"sub\")\n sub.join(\"__init__.py\").ensure()\n sub.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n @pytest.fixture\n def one():\n return 2\n \"\"\"\n )\n )\n sub.join(\"test_y.py\").write(\n textwrap.dedent(\n \"\"\"\\\n def test_x(one):\n assert one == 2\n \"\"\"\n )\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=2)\n with runner.as_cwd():\n reprec = testdir.inline_run(\"..\")\n reprec.assertoutcome(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureManagerParseFactories.test_package_xunit_fixture_TestFixtureManagerParseFactories.test_package_xunit_fixture.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureManagerParseFactories.test_package_xunit_fixture_TestFixtureManagerParseFactories.test_package_xunit_fixture.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1434, "end_line": 1483, "span_ids": ["TestFixtureManagerParseFactories.test_package_xunit_fixture"], "tokens": 276}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureManagerParseFactories(object):\n\n def test_package_xunit_fixture(self, testdir):\n testdir.makepyfile(\n __init__=\"\"\"\\\n values = []\n \"\"\"\n )\n package = testdir.mkdir(\"package\")\n package.join(\"__init__.py\").write(\n textwrap.dedent(\n \"\"\"\\\n from .. import values\n def setup_module():\n values.append(\"package\")\n def teardown_module():\n values[:] = []\n \"\"\"\n )\n )\n package.join(\"test_x.py\").write(\n textwrap.dedent(\n \"\"\"\\\n from .. import values\n def test_x():\n assert values == [\"package\"]\n \"\"\"\n )\n )\n package = testdir.mkdir(\"package2\")\n package.join(\"__init__.py\").write(\n textwrap.dedent(\n \"\"\"\\\n from .. import values\n def setup_module():\n values.append(\"package2\")\n def teardown_module():\n values[:] = []\n \"\"\"\n )\n )\n package.join(\"test_x.py\").write(\n textwrap.dedent(\n \"\"\"\\\n from .. import values\n def test_x():\n assert values == [\"package2\"]\n \"\"\"\n )\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureManagerParseFactories.test_package_fixture_complex_TestFixtureManagerParseFactories.test_collect_custom_items.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureManagerParseFactories.test_package_fixture_complex_TestFixtureManagerParseFactories.test_collect_custom_items.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1485, "end_line": 1529, "span_ids": ["TestFixtureManagerParseFactories.test_collect_custom_items", "TestFixtureManagerParseFactories.test_package_fixture_complex"], "tokens": 291}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureManagerParseFactories(object):\n\n def test_package_fixture_complex(self, testdir):\n testdir.makepyfile(\n __init__=\"\"\"\\\n values = []\n \"\"\"\n )\n testdir.syspathinsert(testdir.tmpdir.dirname)\n package = testdir.mkdir(\"package\")\n package.join(\"__init__.py\").write(\"\")\n package.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n from .. import values\n @pytest.fixture(scope=\"package\")\n def one():\n values.append(\"package\")\n yield values\n values.pop()\n @pytest.fixture(scope=\"package\", autouse=True)\n def two():\n values.append(\"package-auto\")\n yield values\n values.pop()\n \"\"\"\n )\n )\n package.join(\"test_x.py\").write(\n textwrap.dedent(\n \"\"\"\\\n from .. import values\n def test_package_autouse():\n assert values == [\"package-auto\"]\n def test_package(one):\n assert values == [\"package-auto\", \"package\"]\n \"\"\"\n )\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=2)\n\n def test_collect_custom_items(self, testdir):\n testdir.copy_example(\"fixtures/custom_item\")\n result = testdir.runpytest(\"foo\")\n result.stdout.fnmatch_lines([\"*passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseDiscovery_TestAutouseDiscovery.testdir.return.testdir": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseDiscovery_TestAutouseDiscovery.testdir.return.testdir", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1532, "end_line": 1558, "span_ids": ["TestAutouseDiscovery", "TestAutouseDiscovery.testdir"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAutouseDiscovery(object):\n @pytest.fixture\n def testdir(self, testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n @pytest.fixture(autouse=True)\n def perfunction(request, tmpdir):\n pass\n\n @pytest.fixture()\n def arg1(tmpdir):\n pass\n @pytest.fixture(autouse=True)\n def perfunction2(arg1):\n pass\n\n @pytest.fixture\n def fm(request):\n return request._fixturemanager\n\n @pytest.fixture\n def item(request):\n return request._pyfuncitem\n \"\"\"\n )\n return testdir", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseDiscovery.test_parsefactories_conftest_TestAutouseDiscovery.test_parsefactories_conftest.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseDiscovery.test_parsefactories_conftest_TestAutouseDiscovery.test_parsefactories_conftest.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1560, "end_line": 1572, "span_ids": ["TestAutouseDiscovery.test_parsefactories_conftest"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAutouseDiscovery(object):\n\n def test_parsefactories_conftest(self, testdir):\n testdir.makepyfile(\n \"\"\"\n from _pytest.pytester import get_public_names\n def test_check_setup(item, fm):\n autousenames = fm._getautousenames(item.nodeid)\n assert len(get_public_names(autousenames)) == 2\n assert \"perfunction2\" in autousenames\n assert \"perfunction\" in autousenames\n \"\"\"\n )\n reprec = testdir.inline_run(\"-s\")\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseDiscovery.test_two_classes_separated_autouse_TestAutouseDiscovery.test_two_classes_separated_autouse.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseDiscovery.test_two_classes_separated_autouse_TestAutouseDiscovery.test_two_classes_separated_autouse.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1574, "end_line": 1595, "span_ids": ["TestAutouseDiscovery.test_two_classes_separated_autouse"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAutouseDiscovery(object):\n\n def test_two_classes_separated_autouse(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n class TestA(object):\n values = []\n @pytest.fixture(autouse=True)\n def setup1(self):\n self.values.append(1)\n def test_setup1(self):\n assert self.values == [1]\n class TestB(object):\n values = []\n @pytest.fixture(autouse=True)\n def setup2(self):\n self.values.append(1)\n def test_setup2(self):\n assert self.values == [1]\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseDiscovery.test_setup_at_classlevel_TestAutouseDiscovery.test_setup_at_classlevel.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseDiscovery.test_setup_at_classlevel_TestAutouseDiscovery.test_setup_at_classlevel.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1597, "end_line": 1612, "span_ids": ["TestAutouseDiscovery.test_setup_at_classlevel"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAutouseDiscovery(object):\n\n def test_setup_at_classlevel(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n class TestClass(object):\n @pytest.fixture(autouse=True)\n def permethod(self, request):\n request.instance.funcname = request.function.__name__\n def test_method1(self):\n assert self.funcname == \"test_method1\"\n def test_method2(self):\n assert self.funcname == \"test_method2\"\n \"\"\"\n )\n reprec = testdir.inline_run(\"-s\")\n reprec.assertoutcome(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseDiscovery.test_setup_enabled_functionnode_TestAutouseDiscovery.test_setup_enabled_functionnode.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseDiscovery.test_setup_enabled_functionnode_TestAutouseDiscovery.test_setup_enabled_functionnode.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1614, "end_line": 1640, "span_ids": ["TestAutouseDiscovery.test_setup_enabled_functionnode"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAutouseDiscovery(object):\n\n @pytest.mark.xfail(reason=\"'enabled' feature not implemented\")\n def test_setup_enabled_functionnode(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n def enabled(parentnode, markers):\n return \"needsdb\" in markers\n\n @pytest.fixture(params=[1,2])\n def db(request):\n return request.param\n\n @pytest.fixture(enabled=enabled, autouse=True)\n def createdb(db):\n pass\n\n def test_func1(request):\n assert \"db\" not in request.fixturenames\n\n @pytest.mark.needsdb\n def test_func2(request):\n assert \"db\" in request.fixturenames\n \"\"\"\n )\n reprec = testdir.inline_run(\"-s\")\n reprec.assertoutcome(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseDiscovery.test_callables_nocode_TestAutouseDiscovery.test_callables_nocode.reprec_assertoutcome_fail": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseDiscovery.test_callables_nocode_TestAutouseDiscovery.test_callables_nocode.reprec_assertoutcome_fail", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1642, "end_line": 1659, "span_ids": ["TestAutouseDiscovery.test_callables_nocode"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAutouseDiscovery(object):\n\n def test_callables_nocode(self, testdir):\n \"\"\"\n an imported mock.call would break setup/factory discovery\n due to it being callable and __code__ not being a code object\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n class _call(tuple):\n def __call__(self, *k, **kw):\n pass\n def __getattr__(self, k):\n return self\n\n call = _call()\n \"\"\"\n )\n reprec = testdir.inline_run(\"-s\")\n reprec.assertoutcome(failed=0, passed=0)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseDiscovery.test_autouse_in_conftests_TestAutouseDiscovery.test_autouse_in_conftests.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseDiscovery.test_autouse_in_conftests_TestAutouseDiscovery.test_autouse_in_conftests.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1661, "end_line": 1680, "span_ids": ["TestAutouseDiscovery.test_autouse_in_conftests"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAutouseDiscovery(object):\n\n def test_autouse_in_conftests(self, testdir):\n a = testdir.mkdir(\"a\")\n b = testdir.mkdir(\"a1\")\n conftest = testdir.makeconftest(\n \"\"\"\n import pytest\n @pytest.fixture(autouse=True)\n def hello():\n xxx\n \"\"\"\n )\n conftest.move(a.join(conftest.basename))\n a.join(\"test_something.py\").write(\"def test_func(): pass\")\n b.join(\"test_otherthing.py\").write(\"def test_func(): pass\")\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n \"\"\"\n *1 passed*1 error*\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseDiscovery.test_autouse_in_module_and_two_classes_TestAutouseDiscovery.test_autouse_in_module_and_two_classes.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseDiscovery.test_autouse_in_module_and_two_classes_TestAutouseDiscovery.test_autouse_in_module_and_two_classes.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1682, "end_line": 1705, "span_ids": ["TestAutouseDiscovery.test_autouse_in_module_and_two_classes"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAutouseDiscovery(object):\n\n def test_autouse_in_module_and_two_classes(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n values = []\n @pytest.fixture(autouse=True)\n def append1():\n values.append(\"module\")\n def test_x():\n assert values == [\"module\"]\n\n class TestA(object):\n @pytest.fixture(autouse=True)\n def append2(self):\n values.append(\"A\")\n def test_hello(self):\n assert values == [\"module\", \"module\", \"A\"], values\n class TestA2(object):\n def test_world(self):\n assert values == [\"module\", \"module\", \"A\", \"module\"], values\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement_TestAutouseManagement.test_autouse_conftest_mid_directory.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement_TestAutouseManagement.test_autouse_conftest_mid_directory.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1708, "end_line": 1733, "span_ids": ["TestAutouseManagement", "TestAutouseManagement.test_autouse_conftest_mid_directory"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAutouseManagement(object):\n def test_autouse_conftest_mid_directory(self, testdir):\n pkgdir = testdir.mkpydir(\"xyz123\")\n pkgdir.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n @pytest.fixture(autouse=True)\n def app():\n import sys\n sys._myapp = \"hello\"\n \"\"\"\n )\n )\n t = pkgdir.ensure(\"tests\", \"test_app.py\")\n t.write(\n textwrap.dedent(\n \"\"\"\\\n import sys\n def test_app():\n assert sys._myapp == \"hello\"\n \"\"\"\n )\n )\n reprec = testdir.inline_run(\"-s\")\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement.test_funcarg_and_setup_TestAutouseManagement.test_funcarg_and_setup.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement.test_funcarg_and_setup_TestAutouseManagement.test_funcarg_and_setup.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1735, "end_line": 1760, "span_ids": ["TestAutouseManagement.test_funcarg_and_setup"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAutouseManagement(object):\n\n def test_funcarg_and_setup(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n values = []\n @pytest.fixture(scope=\"module\")\n def arg():\n values.append(1)\n return 0\n @pytest.fixture(scope=\"module\", autouse=True)\n def something(arg):\n values.append(2)\n\n def test_hello(arg):\n assert len(values) == 2\n assert values == [1,2]\n assert arg == 0\n\n def test_hello2(arg):\n assert len(values) == 2\n assert values == [1,2]\n assert arg == 0\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement.test_uses_parametrized_resource_TestAutouseManagement.test_uses_parametrized_resource.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement.test_uses_parametrized_resource_TestAutouseManagement.test_uses_parametrized_resource.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1762, "end_line": 1786, "span_ids": ["TestAutouseManagement.test_uses_parametrized_resource"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAutouseManagement(object):\n\n def test_uses_parametrized_resource(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n values = []\n @pytest.fixture(params=[1,2])\n def arg(request):\n return request.param\n\n @pytest.fixture(autouse=True)\n def something(arg):\n values.append(arg)\n\n def test_hello():\n if len(values) == 1:\n assert values == [1]\n elif len(values) == 2:\n assert values == [1, 2]\n else:\n 0/0\n\n \"\"\"\n )\n reprec = testdir.inline_run(\"-s\")\n reprec.assertoutcome(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement.test_session_parametrized_function_TestAutouseManagement.test_session_parametrized_function.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement.test_session_parametrized_function_TestAutouseManagement.test_session_parametrized_function.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1788, "end_line": 1813, "span_ids": ["TestAutouseManagement.test_session_parametrized_function"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAutouseManagement(object):\n\n def test_session_parametrized_function(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n values = []\n\n @pytest.fixture(scope=\"session\", params=[1,2])\n def arg(request):\n return request.param\n\n @pytest.fixture(scope=\"function\", autouse=True)\n def append(request, arg):\n if request.function.__name__ == \"test_some\":\n values.append(arg)\n\n def test_some():\n pass\n\n def test_result(arg):\n assert len(values) == arg\n assert values[:arg] == [1,2][:arg]\n \"\"\"\n )\n reprec = testdir.inline_run(\"-v\", \"-s\")\n reprec.assertoutcome(passed=4)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement.test_class_function_parametrization_finalization_TestAutouseManagement.test_class_function_parametrization_finalization.assert_values_fin_a1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement.test_class_function_parametrization_finalization_TestAutouseManagement.test_class_function_parametrization_finalization.assert_values_fin_a1", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1815, "end_line": 1855, "span_ids": ["TestAutouseManagement.test_class_function_parametrization_finalization"], "tokens": 302}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAutouseManagement(object):\n\n def test_class_function_parametrization_finalization(self, testdir):\n p = testdir.makeconftest(\n \"\"\"\n import pytest\n import pprint\n\n values = []\n\n @pytest.fixture(scope=\"function\", params=[1,2])\n def farg(request):\n return request.param\n\n @pytest.fixture(scope=\"class\", params=list(\"ab\"))\n def carg(request):\n return request.param\n\n @pytest.fixture(scope=\"function\", autouse=True)\n def append(request, farg, carg):\n def fin():\n values.append(\"fin_%s%s\" % (carg, farg))\n request.addfinalizer(fin)\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n class TestClass(object):\n def test_1(self):\n pass\n class TestClass2(object):\n def test_2(self):\n pass\n \"\"\"\n )\n confcut = \"--confcutdir={}\".format(testdir.tmpdir)\n reprec = testdir.inline_run(\"-v\", \"-s\", confcut)\n reprec.assertoutcome(passed=8)\n config = reprec.getcalls(\"pytest_unconfigure\")[0].config\n values = config.pluginmanager._getconftestmodules(p)[0].values\n assert values == [\"fin_a1\", \"fin_a2\", \"fin_b1\", \"fin_b2\"] * 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement.test_scope_ordering_TestAutouseManagement.test_scope_ordering.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement.test_scope_ordering_TestAutouseManagement.test_scope_ordering.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1857, "end_line": 1878, "span_ids": ["TestAutouseManagement.test_scope_ordering"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAutouseManagement(object):\n\n def test_scope_ordering(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n values = []\n @pytest.fixture(scope=\"function\", autouse=True)\n def fappend2():\n values.append(2)\n @pytest.fixture(scope=\"class\", autouse=True)\n def classappend3():\n values.append(3)\n @pytest.fixture(scope=\"module\", autouse=True)\n def mappend():\n values.append(1)\n\n class TestHallo(object):\n def test_method(self):\n assert values == [1,3,2]\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement.test_parametrization_setup_teardown_ordering_TestAutouseManagement.test_parametrization_setup_teardown_ordering.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement.test_parametrization_setup_teardown_ordering_TestAutouseManagement.test_parametrization_setup_teardown_ordering.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1880, "end_line": 1907, "span_ids": ["TestAutouseManagement.test_parametrization_setup_teardown_ordering"], "tokens": 276}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAutouseManagement(object):\n\n def test_parametrization_setup_teardown_ordering(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n values = []\n def pytest_generate_tests(metafunc):\n if metafunc.cls is None:\n assert metafunc.function is test_finish\n if metafunc.cls is not None:\n metafunc.parametrize(\"item\", [1,2], scope=\"class\")\n class TestClass(object):\n @pytest.fixture(scope=\"class\", autouse=True)\n def addteardown(self, item, request):\n values.append(\"setup-%d\" % item)\n request.addfinalizer(lambda: values.append(\"teardown-%d\" % item))\n def test_step1(self, item):\n values.append(\"step1-%d\" % item)\n def test_step2(self, item):\n values.append(\"step2-%d\" % item)\n\n def test_finish():\n print(values)\n assert values == [\"setup-1\", \"step1-1\", \"step2-1\", \"teardown-1\",\n \"setup-2\", \"step1-2\", \"step2-2\", \"teardown-2\",]\n \"\"\"\n )\n reprec = testdir.inline_run(\"-s\")\n reprec.assertoutcome(passed=5)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement.test_ordering_autouse_before_explicit_TestAutouseManagement.test_ordering_autouse_before_explicit.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement.test_ordering_autouse_before_explicit_TestAutouseManagement.test_ordering_autouse_before_explicit.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1909, "end_line": 1926, "span_ids": ["TestAutouseManagement.test_ordering_autouse_before_explicit"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAutouseManagement(object):\n\n def test_ordering_autouse_before_explicit(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n values = []\n @pytest.fixture(autouse=True)\n def fix1():\n values.append(1)\n @pytest.fixture()\n def arg1():\n values.append(2)\n def test_hello(arg1):\n assert values == [1,2]\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement.test_ordering_dependencies_torndown_first_TestAutouseManagement.test_ordering_dependencies_torndown_first.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestAutouseManagement.test_ordering_dependencies_torndown_first_TestAutouseManagement.test_ordering_dependencies_torndown_first.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1928, "end_line": 1953, "span_ids": ["TestAutouseManagement.test_ordering_dependencies_torndown_first"], "tokens": 245}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAutouseManagement(object):\n\n @pytest.mark.issue(226)\n @pytest.mark.parametrize(\"param1\", [\"\", \"params=[1]\"], ids=[\"p00\", \"p01\"])\n @pytest.mark.parametrize(\"param2\", [\"\", \"params=[1]\"], ids=[\"p10\", \"p11\"])\n def test_ordering_dependencies_torndown_first(self, testdir, param1, param2):\n testdir.makepyfile(\n \"\"\"\n import pytest\n values = []\n @pytest.fixture(%(param1)s)\n def arg1(request):\n request.addfinalizer(lambda: values.append(\"fin1\"))\n values.append(\"new1\")\n @pytest.fixture(%(param2)s)\n def arg2(request, arg1):\n request.addfinalizer(lambda: values.append(\"fin2\"))\n values.append(\"new2\")\n\n def test_arg(arg2):\n pass\n def test_check():\n assert values == [\"new1\", \"new2\", \"fin2\", \"fin1\"]\n \"\"\"\n % locals()\n )\n reprec = testdir.inline_run(\"-s\")\n reprec.assertoutcome(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker_TestFixtureMarker.test_multiple_parametrization_issue_736.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker_TestFixtureMarker.test_multiple_parametrization_issue_736.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1956, "end_line": 1990, "span_ids": ["TestFixtureMarker.test_multiple_parametrization_issue_736", "TestFixtureMarker.test_parametrize", "TestFixtureMarker"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n def test_parametrize(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture(params=[\"a\", \"b\", \"c\"])\n def arg(request):\n return request.param\n values = []\n def test_param(arg):\n values.append(arg)\n def test_result():\n assert values == list(\"abc\")\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=4)\n\n def test_multiple_parametrization_issue_736(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture(params=[1,2,3])\n def foo(request):\n return request.param\n\n @pytest.mark.parametrize('foobar', [4,5,6])\n def test_issue(foo, foobar):\n assert foo in [1,2,3]\n assert foobar in [4,5,6]\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=9)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_override_parametrized_fixture_issue_979_TestFixtureMarker.test_override_parametrized_fixture_issue_979.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_override_parametrized_fixture_issue_979_TestFixtureMarker.test_override_parametrized_fixture_issue_979.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1992, "end_line": 2016, "span_ids": ["TestFixtureMarker.test_override_parametrized_fixture_issue_979"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n @pytest.mark.parametrize(\n \"param_args\",\n [\"'fixt, val'\", \"'fixt,val'\", \"['fixt', 'val']\", \"('fixt', 'val')\"],\n )\n def test_override_parametrized_fixture_issue_979(self, testdir, param_args):\n \"\"\"Make sure a parametrized argument can override a parametrized fixture.\n\n This was a regression introduced in the fix for #736.\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture(params=[1, 2])\n def fixt(request):\n return request.param\n\n @pytest.mark.parametrize(%s, [(3, 'x'), (4, 'x')])\n def test_foo(fixt, val):\n pass\n \"\"\"\n % param_args\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_scope_session_TestFixtureMarker.test_scope_session.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_scope_session_TestFixtureMarker.test_scope_session.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2018, "end_line": 2040, "span_ids": ["TestFixtureMarker.test_scope_session"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n def test_scope_session(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n values = []\n @pytest.fixture(scope=\"module\")\n def arg():\n values.append(1)\n return 1\n\n def test_1(arg):\n assert arg == 1\n def test_2(arg):\n assert arg == 1\n assert len(values) == 1\n class TestClass(object):\n def test3(self, arg):\n assert arg == 1\n assert len(values) == 1\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_scope_session_exc_TestFixtureMarker.test_scope_session_exc.reprec_assertoutcome_skip": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_scope_session_exc_TestFixtureMarker.test_scope_session_exc.reprec_assertoutcome_skip", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2042, "end_line": 2061, "span_ids": ["TestFixtureMarker.test_scope_session_exc"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n def test_scope_session_exc(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n values = []\n @pytest.fixture(scope=\"session\")\n def fix():\n values.append(1)\n pytest.skip('skipping')\n\n def test_1(fix):\n pass\n def test_2(fix):\n pass\n def test_last():\n assert values == [1]\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(skipped=2, passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_scope_session_exc_two_fix_TestFixtureMarker.test_scope_session_exc_two_fix.reprec_assertoutcome_skip": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_scope_session_exc_two_fix_TestFixtureMarker.test_scope_session_exc_two_fix.reprec_assertoutcome_skip", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2063, "end_line": 2087, "span_ids": ["TestFixtureMarker.test_scope_session_exc_two_fix"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n def test_scope_session_exc_two_fix(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n values = []\n m = []\n @pytest.fixture(scope=\"session\")\n def a():\n values.append(1)\n pytest.skip('skipping')\n @pytest.fixture(scope=\"session\")\n def b(a):\n m.append(1)\n\n def test_1(b):\n pass\n def test_2(b):\n pass\n def test_last():\n assert values == [1]\n assert m == []\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(skipped=2, passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_scope_exc_TestFixtureMarker.test_scope_exc.reprec_assertoutcome_skip": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_scope_exc_TestFixtureMarker.test_scope_exc.reprec_assertoutcome_skip", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2089, "end_line": 2116, "span_ids": ["TestFixtureMarker.test_scope_exc"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n def test_scope_exc(self, testdir):\n testdir.makepyfile(\n test_foo=\"\"\"\n def test_foo(fix):\n pass\n \"\"\",\n test_bar=\"\"\"\n def test_bar(fix):\n pass\n \"\"\",\n conftest=\"\"\"\n import pytest\n reqs = []\n @pytest.fixture(scope=\"session\")\n def fix(request):\n reqs.append(1)\n pytest.skip()\n @pytest.fixture\n def req_list():\n return reqs\n \"\"\",\n test_real=\"\"\"\n def test_last(req_list):\n assert req_list == [1]\n \"\"\",\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(skipped=2, passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_scope_module_uses_session_TestFixtureMarker.test_scope_module_uses_session.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_scope_module_uses_session_TestFixtureMarker.test_scope_module_uses_session.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2118, "end_line": 2140, "span_ids": ["TestFixtureMarker.test_scope_module_uses_session"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n def test_scope_module_uses_session(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n values = []\n @pytest.fixture(scope=\"module\")\n def arg():\n values.append(1)\n return 1\n\n def test_1(arg):\n assert arg == 1\n def test_2(arg):\n assert arg == 1\n assert len(values) == 1\n class TestClass(object):\n def test3(self, arg):\n assert arg == 1\n assert len(values) == 1\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_scope_module_and_finalizer_TestFixtureMarker.test_scope_module_and_finalizer.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_scope_module_and_finalizer_TestFixtureMarker.test_scope_module_and_finalizer.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2142, "end_line": 2180, "span_ids": ["TestFixtureMarker.test_scope_module_and_finalizer"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n def test_scope_module_and_finalizer(self, testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n finalized_list = []\n created_list = []\n @pytest.fixture(scope=\"module\")\n def arg(request):\n created_list.append(1)\n assert request.scope == \"module\"\n request.addfinalizer(lambda: finalized_list.append(1))\n @pytest.fixture\n def created(request):\n return len(created_list)\n @pytest.fixture\n def finalized(request):\n return len(finalized_list)\n \"\"\"\n )\n testdir.makepyfile(\n test_mod1=\"\"\"\n def test_1(arg, created, finalized):\n assert created == 1\n assert finalized == 0\n def test_2(arg, created, finalized):\n assert created == 1\n assert finalized == 0\"\"\",\n test_mod2=\"\"\"\n def test_3(arg, created, finalized):\n assert created == 2\n assert finalized == 1\"\"\",\n test_mode3=\"\"\"\n def test_4(arg, created, finalized):\n assert created == 3\n assert finalized == 2\n \"\"\",\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=4)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_scope_mismatch_various_TestFixtureMarker.test_scope_mismatch_various.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_scope_mismatch_various_TestFixtureMarker.test_scope_mismatch_various.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2182, "end_line": 2207, "span_ids": ["TestFixtureMarker.test_scope_mismatch_various"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n def test_scope_mismatch_various(self, testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n finalized = []\n created = []\n @pytest.fixture(scope=\"function\")\n def arg(request):\n pass\n \"\"\"\n )\n testdir.makepyfile(\n test_mod1=\"\"\"\n import pytest\n @pytest.fixture(scope=\"session\")\n def arg(request):\n request.getfixturevalue(\"arg\")\n def test_1(arg):\n pass\n \"\"\"\n )\n result = testdir.runpytest(SHOW_PYTEST_WARNINGS_ARG)\n assert result.ret != 0\n result.stdout.fnmatch_lines(\n [\"*ScopeMismatch*You tried*function*session*request*\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_register_only_with_mark_TestFixtureMarker.test_register_only_with_mark.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_register_only_with_mark_TestFixtureMarker.test_register_only_with_mark.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2209, "end_line": 2229, "span_ids": ["TestFixtureMarker.test_register_only_with_mark"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n def test_register_only_with_mark(self, testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n @pytest.fixture()\n def arg():\n return 1\n \"\"\"\n )\n testdir.makepyfile(\n test_mod1=\"\"\"\n import pytest\n @pytest.fixture()\n def arg(arg):\n return arg + 1\n def test_1(arg):\n assert arg == 2\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_parametrize_and_scope_TestFixtureMarker.test_parametrize_and_scope.assert_c_in_values": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_parametrize_and_scope_TestFixtureMarker.test_parametrize_and_scope.assert_c_in_values", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2231, "end_line": 2249, "span_ids": ["TestFixtureMarker.test_parametrize_and_scope"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n def test_parametrize_and_scope(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture(scope=\"module\", params=[\"a\", \"b\", \"c\"])\n def arg(request):\n return request.param\n values = []\n def test_param(arg):\n values.append(arg)\n \"\"\"\n )\n reprec = testdir.inline_run(\"-v\")\n reprec.assertoutcome(passed=3)\n values = reprec.getcalls(\"pytest_runtest_call\")[0].item.module.values\n assert len(values) == 3\n assert \"a\" in values\n assert \"b\" in values\n assert \"c\" in values", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_scope_mismatch_TestFixtureMarker.test_scope_mismatch.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_scope_mismatch_TestFixtureMarker.test_scope_mismatch.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2251, "end_line": 2271, "span_ids": ["TestFixtureMarker.test_scope_mismatch"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n def test_scope_mismatch(self, testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n @pytest.fixture(scope=\"function\")\n def arg(request):\n pass\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture(scope=\"session\")\n def arg(arg):\n pass\n def test_mismatch(arg):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*ScopeMismatch*\", \"*1 error*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_parametrize_separated_order_TestFixtureMarker.test_parametrize_separated_order.assert_values_1_1_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_parametrize_separated_order_TestFixtureMarker.test_parametrize_separated_order.assert_values_1_1_2", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2273, "end_line": 2292, "span_ids": ["TestFixtureMarker.test_parametrize_separated_order"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n def test_parametrize_separated_order(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture(scope=\"module\", params=[1, 2])\n def arg(request):\n return request.param\n\n values = []\n def test_1(arg):\n values.append(arg)\n def test_2(arg):\n values.append(arg)\n \"\"\"\n )\n reprec = testdir.inline_run(\"-v\")\n reprec.assertoutcome(passed=4)\n values = reprec.getcalls(\"pytest_runtest_call\")[0].item.module.values\n assert values == [1, 1, 2, 2]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_module_parametrized_ordering_TestFixtureMarker.test_module_parametrized_ordering.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_module_parametrized_ordering_TestFixtureMarker.test_module_parametrized_ordering.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2294, "end_line": 2351, "span_ids": ["TestFixtureMarker.test_module_parametrized_ordering"], "tokens": 488}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n def test_module_parametrized_ordering(self, testdir):\n testdir.makeini(\n \"\"\"\n [pytest]\n console_output_style=classic\n \"\"\"\n )\n testdir.makeconftest(\n \"\"\"\n import pytest\n\n @pytest.fixture(scope=\"session\", params=\"s1 s2\".split())\n def sarg():\n pass\n @pytest.fixture(scope=\"module\", params=\"m1 m2\".split())\n def marg():\n pass\n \"\"\"\n )\n testdir.makepyfile(\n test_mod1=\"\"\"\n def test_func(sarg):\n pass\n def test_func1(marg):\n pass\n \"\"\",\n test_mod2=\"\"\"\n def test_func2(sarg):\n pass\n def test_func3(sarg, marg):\n pass\n def test_func3b(sarg, marg):\n pass\n def test_func4(marg):\n pass\n \"\"\",\n )\n result = testdir.runpytest(\"-v\")\n result.stdout.fnmatch_lines(\n \"\"\"\n test_mod1.py::test_func[s1] PASSED\n test_mod2.py::test_func2[s1] PASSED\n test_mod2.py::test_func3[s1-m1] PASSED\n test_mod2.py::test_func3b[s1-m1] PASSED\n test_mod2.py::test_func3[s1-m2] PASSED\n test_mod2.py::test_func3b[s1-m2] PASSED\n test_mod1.py::test_func[s2] PASSED\n test_mod2.py::test_func2[s2] PASSED\n test_mod2.py::test_func3[s2-m1] PASSED\n test_mod2.py::test_func3b[s2-m1] PASSED\n test_mod2.py::test_func4[m1] PASSED\n test_mod2.py::test_func3[s2-m2] PASSED\n test_mod2.py::test_func3b[s2-m2] PASSED\n test_mod2.py::test_func4[m2] PASSED\n test_mod1.py::test_func1[m1] PASSED\n test_mod1.py::test_func1[m2] PASSED\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_dynamic_parametrized_ordering_TestFixtureMarker.test_dynamic_parametrized_ordering.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_dynamic_parametrized_ordering_TestFixtureMarker.test_dynamic_parametrized_ordering.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2353, "end_line": 2400, "span_ids": ["TestFixtureMarker.test_dynamic_parametrized_ordering"], "tokens": 408}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n def test_dynamic_parametrized_ordering(self, testdir):\n testdir.makeini(\n \"\"\"\n [pytest]\n console_output_style=classic\n \"\"\"\n )\n testdir.makeconftest(\n \"\"\"\n import pytest\n\n def pytest_configure(config):\n class DynamicFixturePlugin(object):\n @pytest.fixture(scope='session', params=['flavor1', 'flavor2'])\n def flavor(self, request):\n return request.param\n config.pluginmanager.register(DynamicFixturePlugin(), 'flavor-fixture')\n\n @pytest.fixture(scope='session', params=['vxlan', 'vlan'])\n def encap(request):\n return request.param\n\n @pytest.fixture(scope='session', autouse='True')\n def reprovision(request, flavor, encap):\n pass\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n def test(reprovision):\n pass\n def test2(reprovision):\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"-v\")\n result.stdout.fnmatch_lines(\n \"\"\"\n test_dynamic_parametrized_ordering.py::test[flavor1-vxlan] PASSED\n test_dynamic_parametrized_ordering.py::test2[flavor1-vxlan] PASSED\n test_dynamic_parametrized_ordering.py::test[flavor2-vxlan] PASSED\n test_dynamic_parametrized_ordering.py::test2[flavor2-vxlan] PASSED\n test_dynamic_parametrized_ordering.py::test[flavor2-vlan] PASSED\n test_dynamic_parametrized_ordering.py::test2[flavor2-vlan] PASSED\n test_dynamic_parametrized_ordering.py::test[flavor1-vlan] PASSED\n test_dynamic_parametrized_ordering.py::test2[flavor1-vlan] PASSED\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_class_ordering_TestFixtureMarker.test_class_ordering.result_stdout_re_match_li": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_class_ordering_TestFixtureMarker.test_class_ordering.result_stdout_re_match_li", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2402, "end_line": 2460, "span_ids": ["TestFixtureMarker.test_class_ordering"], "tokens": 504}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n def test_class_ordering(self, testdir):\n testdir.makeini(\n \"\"\"\n [pytest]\n console_output_style=classic\n \"\"\"\n )\n testdir.makeconftest(\n \"\"\"\n import pytest\n\n values = []\n\n @pytest.fixture(scope=\"function\", params=[1,2])\n def farg(request):\n return request.param\n\n @pytest.fixture(scope=\"class\", params=list(\"ab\"))\n def carg(request):\n return request.param\n\n @pytest.fixture(scope=\"function\", autouse=True)\n def append(request, farg, carg):\n def fin():\n values.append(\"fin_%s%s\" % (carg, farg))\n request.addfinalizer(fin)\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n class TestClass2(object):\n def test_1(self):\n pass\n def test_2(self):\n pass\n class TestClass(object):\n def test_3(self):\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"-vs\")\n result.stdout.re_match_lines(\n r\"\"\"\n test_class_ordering.py::TestClass2::test_1\\[a-1\\] PASSED\n test_class_ordering.py::TestClass2::test_1\\[a-2\\] PASSED\n test_class_ordering.py::TestClass2::test_2\\[a-1\\] PASSED\n test_class_ordering.py::TestClass2::test_2\\[a-2\\] PASSED\n test_class_ordering.py::TestClass2::test_1\\[b-1\\] PASSED\n test_class_ordering.py::TestClass2::test_1\\[b-2\\] PASSED\n test_class_ordering.py::TestClass2::test_2\\[b-1\\] PASSED\n test_class_ordering.py::TestClass2::test_2\\[b-2\\] PASSED\n test_class_ordering.py::TestClass::test_3\\[a-1\\] PASSED\n test_class_ordering.py::TestClass::test_3\\[a-2\\] PASSED\n test_class_ordering.py::TestClass::test_3\\[b-1\\] PASSED\n test_class_ordering.py::TestClass::test_3\\[b-2\\] PASSED\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_parametrize_separated_order_higher_scope_first_TestFixtureMarker.test_parametrize_separated_order_higher_scope_first.assert_values_expected": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_parametrize_separated_order_higher_scope_first_TestFixtureMarker.test_parametrize_separated_order_higher_scope_first.assert_values_expected", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2462, "end_line": 2536, "span_ids": ["TestFixtureMarker.test_parametrize_separated_order_higher_scope_first"], "tokens": 501}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n def test_parametrize_separated_order_higher_scope_first(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture(scope=\"function\", params=[1, 2])\n def arg(request):\n param = request.param\n request.addfinalizer(lambda: values.append(\"fin:%s\" % param))\n values.append(\"create:%s\" % param)\n return request.param\n\n @pytest.fixture(scope=\"module\", params=[\"mod1\", \"mod2\"])\n def modarg(request):\n param = request.param\n request.addfinalizer(lambda: values.append(\"fin:%s\" % param))\n values.append(\"create:%s\" % param)\n return request.param\n\n values = []\n def test_1(arg):\n values.append(\"test1\")\n def test_2(modarg):\n values.append(\"test2\")\n def test_3(arg, modarg):\n values.append(\"test3\")\n def test_4(modarg, arg):\n values.append(\"test4\")\n \"\"\"\n )\n reprec = testdir.inline_run(\"-v\")\n reprec.assertoutcome(passed=12)\n values = reprec.getcalls(\"pytest_runtest_call\")[0].item.module.values\n expected = [\n \"create:1\",\n \"test1\",\n \"fin:1\",\n \"create:2\",\n \"test1\",\n \"fin:2\",\n \"create:mod1\",\n \"test2\",\n \"create:1\",\n \"test3\",\n \"fin:1\",\n \"create:2\",\n \"test3\",\n \"fin:2\",\n \"create:1\",\n \"test4\",\n \"fin:1\",\n \"create:2\",\n \"test4\",\n \"fin:2\",\n \"fin:mod1\",\n \"create:mod2\",\n \"test2\",\n \"create:1\",\n \"test3\",\n \"fin:1\",\n \"create:2\",\n \"test3\",\n \"fin:2\",\n \"create:1\",\n \"test4\",\n \"fin:1\",\n \"create:2\",\n \"test4\",\n \"fin:2\",\n \"fin:mod2\",\n ]\n import pprint\n\n pprint.pprint(list(zip(values, expected)))\n assert values == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_parametrized_fixture_teardown_order_TestFixtureMarker.test_parametrized_fixture_teardown_order.assert_error_not_in_res": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_parametrized_fixture_teardown_order_TestFixtureMarker.test_parametrized_fixture_teardown_order.assert_error_not_in_res", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2538, "end_line": 2577, "span_ids": ["TestFixtureMarker.test_parametrized_fixture_teardown_order"], "tokens": 250}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n def test_parametrized_fixture_teardown_order(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture(params=[1,2], scope=\"class\")\n def param1(request):\n return request.param\n\n values = []\n\n class TestClass(object):\n @classmethod\n @pytest.fixture(scope=\"class\", autouse=True)\n def setup1(self, request, param1):\n values.append(1)\n request.addfinalizer(self.teardown1)\n @classmethod\n def teardown1(self):\n assert values.pop() == 1\n @pytest.fixture(scope=\"class\", autouse=True)\n def setup2(self, request, param1):\n values.append(2)\n request.addfinalizer(self.teardown2)\n @classmethod\n def teardown2(self):\n assert values.pop() == 2\n def test(self):\n pass\n\n def test_finish():\n assert not values\n \"\"\"\n )\n result = testdir.runpytest(\"-v\")\n result.stdout.fnmatch_lines(\n \"\"\"\n *3 passed*\n \"\"\"\n )\n assert \"error\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_fixture_finalizer_TestFixtureMarker.test_fixture_finalizer.for_test_in_test_browse.reprec_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_fixture_finalizer_TestFixtureMarker.test_fixture_finalizer.for_test_in_test_browse.reprec_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2579, "end_line": 2611, "span_ids": ["TestFixtureMarker.test_fixture_finalizer"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n def test_fixture_finalizer(self, testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n import sys\n\n @pytest.fixture\n def browser(request):\n\n def finalize():\n sys.stdout.write('Finalized')\n request.addfinalizer(finalize)\n return {}\n \"\"\"\n )\n b = testdir.mkdir(\"subdir\")\n b.join(\"test_overridden_fixture_finalizer.py\").write(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n @pytest.fixture\n def browser(browser):\n browser['visited'] = True\n return browser\n\n def test_browser(browser):\n assert browser['visited'] is True\n \"\"\"\n )\n )\n reprec = testdir.runpytest(\"-s\")\n for test in [\"test_browser\"]:\n reprec.stdout.fnmatch_lines([\"*Finalized*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_class_scope_with_normal_tests_TestFixtureMarker.test_class_scope_with_normal_tests.for_test_in_test_a_t.assert_reprec_matchreport": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_class_scope_with_normal_tests_TestFixtureMarker.test_class_scope_with_normal_tests.for_test_in_test_a_t.assert_reprec_matchreport", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2613, "end_line": 2639, "span_ids": ["TestFixtureMarker.test_class_scope_with_normal_tests"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n def test_class_scope_with_normal_tests(self, testdir):\n testpath = testdir.makepyfile(\n \"\"\"\n import pytest\n\n class Box(object):\n value = 0\n\n @pytest.fixture(scope='class')\n def a(request):\n Box.value += 1\n return Box.value\n\n def test_a(a):\n assert a == 1\n\n class Test1(object):\n def test_b(self, a):\n assert a == 2\n\n class Test2(object):\n def test_c(self, a):\n assert a == 3\"\"\"\n )\n reprec = testdir.inline_run(testpath)\n for test in [\"test_a\", \"test_b\", \"test_c\"]:\n assert reprec.matchreport(test).passed", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_request_is_clean_TestFixtureMarker.test_request_is_clean.assert_values_1_2_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_request_is_clean_TestFixtureMarker.test_request_is_clean.assert_values_1_2_", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2641, "end_line": 2655, "span_ids": ["TestFixtureMarker.test_request_is_clean"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n def test_request_is_clean(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n values = []\n @pytest.fixture(params=[1, 2])\n def fix(request):\n request.addfinalizer(lambda: values.append(request.param))\n def test_fix(fix):\n pass\n \"\"\"\n )\n reprec = testdir.inline_run(\"-s\")\n values = reprec.getcalls(\"pytest_runtest_call\")[0].item.module.values\n assert values == [1, 2]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_parametrize_separated_lifecycle_TestFixtureMarker.test_parametrize_separated_lifecycle.assert_values_5_fin2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_parametrize_separated_lifecycle_TestFixtureMarker.test_parametrize_separated_lifecycle.assert_values_5_fin2", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2657, "end_line": 2684, "span_ids": ["TestFixtureMarker.test_parametrize_separated_lifecycle"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n def test_parametrize_separated_lifecycle(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n values = []\n @pytest.fixture(scope=\"module\", params=[1, 2])\n def arg(request):\n x = request.param\n request.addfinalizer(lambda: values.append(\"fin%s\" % x))\n return request.param\n def test_1(arg):\n values.append(arg)\n def test_2(arg):\n values.append(arg)\n \"\"\"\n )\n reprec = testdir.inline_run(\"-vs\")\n reprec.assertoutcome(passed=4)\n values = reprec.getcalls(\"pytest_runtest_call\")[0].item.module.values\n import pprint\n\n pprint.pprint(values)\n # assert len(values) == 6\n assert values[0] == values[1] == 1\n assert values[2] == \"fin1\"\n assert values[3] == values[4] == 2\n assert values[5] == \"fin2\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_parametrize_function_scoped_finalizers_called_TestFixtureMarker.test_parametrize_function_scoped_finalizers_called.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_parametrize_function_scoped_finalizers_called_TestFixtureMarker.test_parametrize_function_scoped_finalizers_called.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2686, "end_line": 2708, "span_ids": ["TestFixtureMarker.test_parametrize_function_scoped_finalizers_called"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n def test_parametrize_function_scoped_finalizers_called(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture(scope=\"function\", params=[1, 2])\n def arg(request):\n x = request.param\n request.addfinalizer(lambda: values.append(\"fin%s\" % x))\n return request.param\n\n values = []\n def test_1(arg):\n values.append(arg)\n def test_2(arg):\n values.append(arg)\n def test_3():\n assert len(values) == 8\n assert values == [1, \"fin1\", 2, \"fin2\", 1, \"fin1\", 2, \"fin2\"]\n \"\"\"\n )\n reprec = testdir.inline_run(\"-v\")\n reprec.assertoutcome(passed=5)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_finalizer_order_on_parametrization_TestFixtureMarker.test_finalizer_order_on_parametrization.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_finalizer_order_on_parametrization_TestFixtureMarker.test_finalizer_order_on_parametrization.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2710, "end_line": 2745, "span_ids": ["TestFixtureMarker.test_finalizer_order_on_parametrization"], "tokens": 245}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n @pytest.mark.issue(246)\n @pytest.mark.parametrize(\"scope\", [\"session\", \"function\", \"module\"])\n def test_finalizer_order_on_parametrization(self, scope, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n values = []\n\n @pytest.fixture(scope=%(scope)r, params=[\"1\"])\n def fix1(request):\n return request.param\n\n @pytest.fixture(scope=%(scope)r)\n def fix2(request, base):\n def cleanup_fix2():\n assert not values, \"base should not have been finalized\"\n request.addfinalizer(cleanup_fix2)\n\n @pytest.fixture(scope=%(scope)r)\n def base(request, fix1):\n def cleanup_base():\n values.append(\"fin_base\")\n print(\"finalizing base\")\n request.addfinalizer(cleanup_base)\n\n def test_begin():\n pass\n def test_baz(base, fix2):\n pass\n def test_other():\n pass\n \"\"\"\n % {\"scope\": scope}\n )\n reprec = testdir.inline_run(\"-lvs\")\n reprec.assertoutcome(passed=3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_class_scope_parametrization_ordering_TestFixtureMarker.test_class_scope_parametrization_ordering.assert_values_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_class_scope_parametrization_ordering_TestFixtureMarker.test_class_scope_parametrization_ordering.assert_values_", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2747, "end_line": 2784, "span_ids": ["TestFixtureMarker.test_class_scope_parametrization_ordering"], "tokens": 246}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n @pytest.mark.issue(396)\n def test_class_scope_parametrization_ordering(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n values = []\n @pytest.fixture(params=[\"John\", \"Doe\"], scope=\"class\")\n def human(request):\n request.addfinalizer(lambda: values.append(\"fin %s\" % request.param))\n return request.param\n\n class TestGreetings(object):\n def test_hello(self, human):\n values.append(\"test_hello\")\n\n class TestMetrics(object):\n def test_name(self, human):\n values.append(\"test_name\")\n\n def test_population(self, human):\n values.append(\"test_population\")\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=6)\n values = reprec.getcalls(\"pytest_runtest_call\")[0].item.module.values\n assert values == [\n \"test_hello\",\n \"fin John\",\n \"test_hello\",\n \"fin Doe\",\n \"test_name\",\n \"test_population\",\n \"fin John\",\n \"test_name\",\n \"test_population\",\n \"fin Doe\",\n ]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_parametrize_setup_function_TestFixtureMarker.test_parametrize_setup_function.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_parametrize_setup_function_TestFixtureMarker.test_parametrize_setup_function.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2786, "end_line": 2817, "span_ids": ["TestFixtureMarker.test_parametrize_setup_function"], "tokens": 230}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n def test_parametrize_setup_function(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture(scope=\"module\", params=[1, 2])\n def arg(request):\n return request.param\n\n @pytest.fixture(scope=\"module\", autouse=True)\n def mysetup(request, arg):\n request.addfinalizer(lambda: values.append(\"fin%s\" % arg))\n values.append(\"setup%s\" % arg)\n\n values = []\n def test_1(arg):\n values.append(arg)\n def test_2(arg):\n values.append(arg)\n def test_3():\n import pprint\n pprint.pprint(values)\n if arg == 1:\n assert values == [\"setup1\", 1, 1, ]\n elif arg == 2:\n assert values == [\"setup1\", 1, 1, \"fin1\",\n \"setup2\", 2, 2, ]\n\n \"\"\"\n )\n reprec = testdir.inline_run(\"-v\")\n reprec.assertoutcome(passed=6)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_fixture_marked_function_not_collected_as_test_TestFixtureMarker.test_params_and_ids.res_stdout_fnmatch_lines_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_fixture_marked_function_not_collected_as_test_TestFixtureMarker.test_params_and_ids.res_stdout_fnmatch_lines_", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2819, "end_line": 2849, "span_ids": ["TestFixtureMarker.test_params_and_ids", "TestFixtureMarker.test_fixture_marked_function_not_collected_as_test"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n def test_fixture_marked_function_not_collected_as_test(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture\n def test_app():\n return 1\n\n def test_something(test_app):\n assert test_app == 1\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)\n\n def test_params_and_ids(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture(params=[object(), object()],\n ids=['alpha', 'beta'])\n def fix(request):\n return request.param\n\n def test_foo(fix):\n assert 1\n \"\"\"\n )\n res = testdir.runpytest(\"-v\")\n res.stdout.fnmatch_lines([\"*test_foo*alpha*\", \"*test_foo*beta*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_params_and_ids_yieldfixture_TestFixtureMarker.test_params_and_ids_yieldfixture.res_stdout_fnmatch_lines_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_params_and_ids_yieldfixture_TestFixtureMarker.test_params_and_ids_yieldfixture.res_stdout_fnmatch_lines_", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2851, "end_line": 2866, "span_ids": ["TestFixtureMarker.test_params_and_ids_yieldfixture"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n def test_params_and_ids_yieldfixture(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.yield_fixture(params=[object(), object()],\n ids=['alpha', 'beta'])\n def fix(request):\n yield request.param\n\n def test_foo(fix):\n assert 1\n \"\"\"\n )\n res = testdir.runpytest(\"-v\")\n res.stdout.fnmatch_lines([\"*test_foo*alpha*\", \"*test_foo*beta*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_deterministic_fixture_collection_TestFixtureMarker.test_deterministic_fixture_collection.assert_out1_out2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestFixtureMarker.test_deterministic_fixture_collection_TestFixtureMarker.test_deterministic_fixture_collection.assert_out1_out2", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2868, "end_line": 2909, "span_ids": ["TestFixtureMarker.test_deterministic_fixture_collection"], "tokens": 316}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureMarker(object):\n\n @pytest.mark.issue(920)\n def test_deterministic_fixture_collection(self, testdir, monkeypatch):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture(scope=\"module\",\n params=[\"A\",\n \"B\",\n \"C\"])\n def A(request):\n return request.param\n\n @pytest.fixture(scope=\"module\",\n params=[\"DDDDDDDDD\", \"EEEEEEEEEEEE\", \"FFFFFFFFFFF\", \"banansda\"])\n def B(request, A):\n return request.param\n\n def test_foo(B):\n # Something funky is going on here.\n # Despite specified seeds, on what is collected,\n # sometimes we get unexpected passes. hashing B seems\n # to help?\n assert hash(B) or True\n \"\"\"\n )\n monkeypatch.setenv(\"PYTHONHASHSEED\", \"1\")\n out1 = testdir.runpytest_subprocess(\"-v\")\n monkeypatch.setenv(\"PYTHONHASHSEED\", \"2\")\n out2 = testdir.runpytest_subprocess(\"-v\")\n out1 = [\n line\n for line in out1.outlines\n if line.startswith(\"test_deterministic_fixture_collection.py::test_foo\")\n ]\n out2 = [\n line\n for line in out2.outlines\n if line.startswith(\"test_deterministic_fixture_collection.py::test_foo\")\n ]\n assert len(out1) == 12\n assert out1 == out2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestScopeAccess_TestRequestScopeAccess.test_setup.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestScopeAccess_TestRequestScopeAccess.test_setup.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2912, "end_line": 2942, "span_ids": ["TestRequestScopeAccess", "TestRequestScopeAccess.test_setup"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRequestScopeAccess(object):\n pytestmark = pytest.mark.parametrize(\n (\"scope\", \"ok\", \"error\"),\n [\n [\"session\", \"\", \"fspath class function module\"],\n [\"module\", \"module fspath\", \"cls function\"],\n [\"class\", \"module fspath cls\", \"function\"],\n [\"function\", \"module fspath cls function\", \"\"],\n ],\n )\n\n def test_setup(self, testdir, scope, ok, error):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture(scope=%r, autouse=True)\n def myscoped(request):\n for x in %r:\n assert hasattr(request, x)\n for x in %r:\n pytest.raises(AttributeError, lambda:\n getattr(request, x))\n assert request.session\n assert request.config\n def test_func():\n pass\n \"\"\"\n % (scope, ok.split(), error.split())\n )\n reprec = testdir.inline_run(\"-l\")\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestScopeAccess.test_funcarg_TestRequestScopeAccess.test_funcarg.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestRequestScopeAccess.test_funcarg_TestRequestScopeAccess.test_funcarg.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2944, "end_line": 2963, "span_ids": ["TestRequestScopeAccess.test_funcarg"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRequestScopeAccess(object):\n\n def test_funcarg(self, testdir, scope, ok, error):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture(scope=%r)\n def arg(request):\n for x in %r:\n assert hasattr(request, x)\n for x in %r:\n pytest.raises(AttributeError, lambda:\n getattr(request, x))\n assert request.session\n assert request.config\n def test_func(arg):\n pass\n \"\"\"\n % (scope, ok.split(), error.split())\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestErrors_TestErrors.test_subfactory_missing_funcarg.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestErrors_TestErrors.test_subfactory_missing_funcarg.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2966, "end_line": 2982, "span_ids": ["TestErrors", "TestErrors.test_subfactory_missing_funcarg"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestErrors(object):\n def test_subfactory_missing_funcarg(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture()\n def gen(qwe123):\n return 1\n def test_something(gen):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret != 0\n result.stdout.fnmatch_lines(\n [\"*def gen(qwe123):*\", \"*fixture*qwe123*not found*\", \"*1 error*\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestErrors.test_issue498_fixture_finalizer_failing_TestErrors.test_issue498_fixture_finalizer_failing.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestErrors.test_issue498_fixture_finalizer_failing_TestErrors.test_issue498_fixture_finalizer_failing.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 2984, "end_line": 3013, "span_ids": ["TestErrors.test_issue498_fixture_finalizer_failing"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestErrors(object):\n\n def test_issue498_fixture_finalizer_failing(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture\n def fix1(request):\n def f():\n raise KeyError\n request.addfinalizer(f)\n return object()\n\n values = []\n def test_1(fix1):\n values.append(fix1)\n def test_2(fix1):\n values.append(fix1)\n def test_3():\n assert values[0] != values[1]\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n \"\"\"\n *ERROR*teardown*test_1*\n *KeyError*\n *ERROR*teardown*test_2*\n *KeyError*\n *3 pass*2 error*\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestErrors.test_setupfunc_missing_funcarg_TestErrors.test_setupfunc_missing_funcarg.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestErrors.test_setupfunc_missing_funcarg_TestErrors.test_setupfunc_missing_funcarg.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3015, "end_line": 3030, "span_ids": ["TestErrors.test_setupfunc_missing_funcarg"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestErrors(object):\n\n def test_setupfunc_missing_funcarg(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture(autouse=True)\n def gen(qwe123):\n return 1\n def test_something():\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret != 0\n result.stdout.fnmatch_lines(\n [\"*def gen(qwe123):*\", \"*fixture*qwe123*not found*\", \"*1 error*\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures_TestShowFixtures.test_show_fixtures_verbose.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures_TestShowFixtures.test_show_fixtures_verbose.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3033, "end_line": 3044, "span_ids": ["TestShowFixtures", "TestShowFixtures.test_funcarg_compat", "TestShowFixtures.test_show_fixtures_verbose", "TestShowFixtures.test_show_fixtures"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestShowFixtures(object):\n def test_funcarg_compat(self, testdir):\n config = testdir.parseconfigure(\"--funcargs\")\n assert config.option.showfixtures\n\n def test_show_fixtures(self, testdir):\n result = testdir.runpytest(\"--fixtures\")\n result.stdout.fnmatch_lines([\"*tmpdir*\", \"*temporary directory*\"])\n\n def test_show_fixtures_verbose(self, testdir):\n result = testdir.runpytest(\"--fixtures\", \"-v\")\n result.stdout.fnmatch_lines([\"*tmpdir*--*tmpdir.py*\", \"*temporary directory*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures.test_show_fixtures_testmodule_TestShowFixtures.test_show_fixtures_testmodule.assert_arg0_not_in_resu": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures.test_show_fixtures_testmodule_TestShowFixtures.test_show_fixtures_testmodule.assert_arg0_not_in_resu", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3060, "end_line": 3081, "span_ids": ["TestShowFixtures.test_show_fixtures_testmodule"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestShowFixtures(object):\n\n def test_show_fixtures_testmodule(self, testdir):\n p = testdir.makepyfile(\n '''\n import pytest\n @pytest.fixture\n def _arg0():\n \"\"\" hidden \"\"\"\n @pytest.fixture\n def arg1():\n \"\"\" hello world \"\"\"\n '''\n )\n result = testdir.runpytest(\"--fixtures\", p)\n result.stdout.fnmatch_lines(\n \"\"\"\n *tmpdir\n *fixtures defined from*\n *arg1*\n *hello world*\n \"\"\"\n )\n assert \"arg0\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures.test_show_fixtures_conftest_TestShowFixtures.test_show_fixtures_conftest.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures.test_show_fixtures_conftest_TestShowFixtures.test_show_fixtures_conftest.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3083, "end_line": 3108, "span_ids": ["TestShowFixtures.test_show_fixtures_conftest"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestShowFixtures(object):\n\n @pytest.mark.parametrize(\"testmod\", [True, False])\n def test_show_fixtures_conftest(self, testdir, testmod):\n testdir.makeconftest(\n '''\n import pytest\n @pytest.fixture\n def arg1():\n \"\"\" hello world \"\"\"\n '''\n )\n if testmod:\n testdir.makepyfile(\n \"\"\"\n def test_hello():\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"--fixtures\")\n result.stdout.fnmatch_lines(\n \"\"\"\n *tmpdir*\n *fixtures defined from*conftest*\n *arg1*\n *hello world*\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures.test_show_fixtures_trimmed_doc_TestShowFixtures.test_show_fixtures_trimmed_doc.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures.test_show_fixtures_trimmed_doc_TestShowFixtures.test_show_fixtures_trimmed_doc.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3110, "end_line": 3145, "span_ids": ["TestShowFixtures.test_show_fixtures_trimmed_doc"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestShowFixtures(object):\n\n def test_show_fixtures_trimmed_doc(self, testdir):\n p = testdir.makepyfile(\n textwrap.dedent(\n '''\\\n import pytest\n @pytest.fixture\n def arg1():\n \"\"\"\n line1\n line2\n\n \"\"\"\n @pytest.fixture\n def arg2():\n \"\"\"\n line1\n line2\n\n \"\"\"\n '''\n )\n )\n result = testdir.runpytest(\"--fixtures\", p)\n result.stdout.fnmatch_lines(\n textwrap.dedent(\n \"\"\"\\\n * fixtures defined from test_show_fixtures_trimmed_doc *\n arg2\n line1\n line2\n arg1\n line1\n line2\n \"\"\"\n )\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures.test_show_fixtures_indented_doc_TestShowFixtures.test_show_fixtures_indented_doc.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures.test_show_fixtures_indented_doc_TestShowFixtures.test_show_fixtures_indented_doc.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3147, "end_line": 3171, "span_ids": ["TestShowFixtures.test_show_fixtures_indented_doc"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestShowFixtures(object):\n\n def test_show_fixtures_indented_doc(self, testdir):\n p = testdir.makepyfile(\n textwrap.dedent(\n '''\\\n import pytest\n @pytest.fixture\n def fixture1():\n \"\"\"\n line1\n indented line\n \"\"\"\n '''\n )\n )\n result = testdir.runpytest(\"--fixtures\", p)\n result.stdout.fnmatch_lines(\n textwrap.dedent(\n \"\"\"\\\n * fixtures defined from test_show_fixtures_indented_doc *\n fixture1\n line1\n indented line\n \"\"\"\n )\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures.test_show_fixtures_indented_doc_first_line_unindented_TestShowFixtures.test_show_fixtures_indented_doc_first_line_unindented.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures.test_show_fixtures_indented_doc_first_line_unindented_TestShowFixtures.test_show_fixtures_indented_doc_first_line_unindented.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3173, "end_line": 3198, "span_ids": ["TestShowFixtures.test_show_fixtures_indented_doc_first_line_unindented"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestShowFixtures(object):\n\n def test_show_fixtures_indented_doc_first_line_unindented(self, testdir):\n p = testdir.makepyfile(\n textwrap.dedent(\n '''\\\n import pytest\n @pytest.fixture\n def fixture1():\n \"\"\"line1\n line2\n indented line\n \"\"\"\n '''\n )\n )\n result = testdir.runpytest(\"--fixtures\", p)\n result.stdout.fnmatch_lines(\n textwrap.dedent(\n \"\"\"\\\n * fixtures defined from test_show_fixtures_indented_doc_first_line_unindented *\n fixture1\n line1\n line2\n indented line\n \"\"\"\n )\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures.test_show_fixtures_indented_in_class_TestShowFixtures.test_show_fixtures_indented_in_class.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures.test_show_fixtures_indented_in_class_TestShowFixtures.test_show_fixtures_indented_in_class.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3200, "end_line": 3226, "span_ids": ["TestShowFixtures.test_show_fixtures_indented_in_class"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestShowFixtures(object):\n\n def test_show_fixtures_indented_in_class(self, testdir):\n p = testdir.makepyfile(\n textwrap.dedent(\n '''\\\n import pytest\n class TestClass(object):\n @pytest.fixture\n def fixture1(self):\n \"\"\"line1\n line2\n indented line\n \"\"\"\n '''\n )\n )\n result = testdir.runpytest(\"--fixtures\", p)\n result.stdout.fnmatch_lines(\n textwrap.dedent(\n \"\"\"\\\n * fixtures defined from test_show_fixtures_indented_in_class *\n fixture1\n line1\n line2\n indented line\n \"\"\"\n )\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures.test_show_fixtures_different_files_TestShowFixtures.test_show_fixtures_different_files.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures.test_show_fixtures_different_files_TestShowFixtures.test_show_fixtures_different_files.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3228, "end_line": 3269, "span_ids": ["TestShowFixtures.test_show_fixtures_different_files"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestShowFixtures(object):\n\n def test_show_fixtures_different_files(self, testdir):\n \"\"\"\n #833: --fixtures only shows fixtures from first file\n \"\"\"\n testdir.makepyfile(\n test_a='''\n import pytest\n\n @pytest.fixture\n def fix_a():\n \"\"\"Fixture A\"\"\"\n pass\n\n def test_a(fix_a):\n pass\n '''\n )\n testdir.makepyfile(\n test_b='''\n import pytest\n\n @pytest.fixture\n def fix_b():\n \"\"\"Fixture B\"\"\"\n pass\n\n def test_b(fix_b):\n pass\n '''\n )\n result = testdir.runpytest(\"--fixtures\")\n result.stdout.fnmatch_lines(\n \"\"\"\n * fixtures defined from test_a *\n fix_a\n Fixture A\n\n * fixtures defined from test_b *\n fix_b\n Fixture B\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures.test_show_fixtures_with_same_name_TestShowFixtures.test_fixture_disallow_twice.with_pytest_raises_ValueE.foo.pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestShowFixtures.test_show_fixtures_with_same_name_TestShowFixtures.test_fixture_disallow_twice.with_pytest_raises_ValueE.foo.pass", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3271, "end_line": 3318, "span_ids": ["TestShowFixtures.test_fixture_disallow_twice", "TestShowFixtures.test_show_fixtures_with_same_name"], "tokens": 266}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestShowFixtures(object):\n\n def test_show_fixtures_with_same_name(self, testdir):\n testdir.makeconftest(\n '''\n import pytest\n @pytest.fixture\n def arg1():\n \"\"\"Hello World in conftest.py\"\"\"\n return \"Hello World\"\n '''\n )\n testdir.makepyfile(\n \"\"\"\n def test_foo(arg1):\n assert arg1 == \"Hello World\"\n \"\"\"\n )\n testdir.makepyfile(\n '''\n import pytest\n @pytest.fixture\n def arg1():\n \"\"\"Hi from test module\"\"\"\n return \"Hi\"\n def test_bar(arg1):\n assert arg1 == \"Hi\"\n '''\n )\n result = testdir.runpytest(\"--fixtures\")\n result.stdout.fnmatch_lines(\n \"\"\"\n * fixtures defined from conftest *\n arg1\n Hello World in conftest.py\n\n * fixtures defined from test_show_fixtures_with_same_name *\n arg1\n Hi from test module\n \"\"\"\n )\n\n def test_fixture_disallow_twice(self):\n \"\"\"Test that applying @pytest.fixture twice generates an error (#2334).\"\"\"\n with pytest.raises(ValueError):\n\n @pytest.fixture\n @pytest.fixture\n def foo():\n pass", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestContextManagerFixtureFuncs_TestContextManagerFixtureFuncs.flavor.testdir_makepyfile_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestContextManagerFixtureFuncs_TestContextManagerFixtureFuncs.flavor.testdir_makepyfile_", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3321, "end_line": 3337, "span_ids": ["TestContextManagerFixtureFuncs.flavor", "TestContextManagerFixtureFuncs"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestContextManagerFixtureFuncs(object):\n @pytest.fixture(params=[\"fixture\", \"yield_fixture\"])\n def flavor(self, request, testdir, monkeypatch):\n monkeypatch.setenv(\"PYTEST_FIXTURE_FLAVOR\", request.param)\n testdir.makepyfile(\n test_context=\"\"\"\n import os\n import pytest\n import warnings\n VAR = \"PYTEST_FIXTURE_FLAVOR\"\n if VAR not in os.environ:\n warnings.warn(\"PYTEST_FIXTURE_FLAVOR was not set, assuming fixture\")\n fixture = pytest.fixture\n else:\n fixture = getattr(pytest, os.environ[VAR])\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestContextManagerFixtureFuncs.test_simple_TestContextManagerFixtureFuncs.test_simple.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestContextManagerFixtureFuncs.test_simple_TestContextManagerFixtureFuncs.test_simple.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3339, "end_line": 3366, "span_ids": ["TestContextManagerFixtureFuncs.test_simple"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestContextManagerFixtureFuncs(object):\n\n def test_simple(self, testdir, flavor):\n testdir.makepyfile(\n \"\"\"\n from __future__ import print_function\n from test_context import fixture\n @fixture\n def arg1():\n print(\"setup\")\n yield 1\n print(\"teardown\")\n def test_1(arg1):\n print(\"test1\", arg1)\n def test_2(arg1):\n print(\"test2\", arg1)\n assert 0\n \"\"\"\n )\n result = testdir.runpytest(\"-s\")\n result.stdout.fnmatch_lines(\n \"\"\"\n *setup*\n *test1 1*\n *teardown*\n *setup*\n *test2 1*\n *teardown*\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestContextManagerFixtureFuncs.test_scoped_TestContextManagerFixtureFuncs.test_scoped.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestContextManagerFixtureFuncs.test_scoped_TestContextManagerFixtureFuncs.test_scoped.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3368, "end_line": 3392, "span_ids": ["TestContextManagerFixtureFuncs.test_scoped"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestContextManagerFixtureFuncs(object):\n\n def test_scoped(self, testdir, flavor):\n testdir.makepyfile(\n \"\"\"\n from __future__ import print_function\n from test_context import fixture\n @fixture(scope=\"module\")\n def arg1():\n print(\"setup\")\n yield 1\n print(\"teardown\")\n def test_1(arg1):\n print(\"test1\", arg1)\n def test_2(arg1):\n print(\"test2\", arg1)\n \"\"\"\n )\n result = testdir.runpytest(\"-s\")\n result.stdout.fnmatch_lines(\n \"\"\"\n *setup*\n *test1 1*\n *test2 1*\n *teardown*\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestContextManagerFixtureFuncs.test_setup_exception_TestContextManagerFixtureFuncs.test_teardown_exception.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestContextManagerFixtureFuncs.test_setup_exception_TestContextManagerFixtureFuncs.test_teardown_exception.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3394, "end_line": 3432, "span_ids": ["TestContextManagerFixtureFuncs.test_setup_exception", "TestContextManagerFixtureFuncs.test_teardown_exception"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestContextManagerFixtureFuncs(object):\n\n def test_setup_exception(self, testdir, flavor):\n testdir.makepyfile(\n \"\"\"\n from test_context import fixture\n @fixture(scope=\"module\")\n def arg1():\n pytest.fail(\"setup\")\n yield 1\n def test_1(arg1):\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"-s\")\n result.stdout.fnmatch_lines(\n \"\"\"\n *pytest.fail*setup*\n *1 error*\n \"\"\"\n )\n\n def test_teardown_exception(self, testdir, flavor):\n testdir.makepyfile(\n \"\"\"\n from test_context import fixture\n @fixture(scope=\"module\")\n def arg1():\n yield 1\n pytest.fail(\"teardown\")\n def test_1(arg1):\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"-s\")\n result.stdout.fnmatch_lines(\n \"\"\"\n *pytest.fail*teardown*\n *1 passed*1 error*\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestContextManagerFixtureFuncs.test_yields_more_than_one_TestContextManagerFixtureFuncs.test_custom_name.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestContextManagerFixtureFuncs.test_yields_more_than_one_TestContextManagerFixtureFuncs.test_custom_name.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3434, "end_line": 3466, "span_ids": ["TestContextManagerFixtureFuncs.test_yields_more_than_one", "TestContextManagerFixtureFuncs.test_custom_name"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestContextManagerFixtureFuncs(object):\n\n def test_yields_more_than_one(self, testdir, flavor):\n testdir.makepyfile(\n \"\"\"\n from test_context import fixture\n @fixture(scope=\"module\")\n def arg1():\n yield 1\n yield 2\n def test_1(arg1):\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"-s\")\n result.stdout.fnmatch_lines(\n \"\"\"\n *fixture function*\n *test_yields*:2*\n \"\"\"\n )\n\n def test_custom_name(self, testdir, flavor):\n testdir.makepyfile(\n \"\"\"\n from test_context import fixture\n @fixture(name='meow')\n def arg1():\n return 'mew'\n def test_1(meow):\n print(meow)\n \"\"\"\n )\n result = testdir.runpytest(\"-s\")\n result.stdout.fnmatch_lines([\"*mew*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestParameterizedSubRequest_TestParameterizedSubRequest.test_call_from_fixture.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestParameterizedSubRequest_TestParameterizedSubRequest.test_call_from_fixture.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3469, "end_line": 3498, "span_ids": ["TestParameterizedSubRequest", "TestParameterizedSubRequest.test_call_from_fixture"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestParameterizedSubRequest(object):\n def test_call_from_fixture(self, testdir):\n testdir.makepyfile(\n test_call_from_fixture=\"\"\"\n import pytest\n\n @pytest.fixture(params=[0, 1, 2])\n def fix_with_param(request):\n return request.param\n\n @pytest.fixture\n def get_named_fixture(request):\n return request.getfixturevalue('fix_with_param')\n\n def test_foo(request, get_named_fixture):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"The requested fixture has no parameter defined for test:\",\n \" test_call_from_fixture.py::test_foo\",\n \"Requested fixture 'fix_with_param' defined in:\",\n \"test_call_from_fixture.py:4\",\n \"Requested here:\",\n \"test_call_from_fixture.py:9\",\n \"*1 error in*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestParameterizedSubRequest.test_call_from_test_TestParameterizedSubRequest.test_call_from_test.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestParameterizedSubRequest.test_call_from_test_TestParameterizedSubRequest.test_call_from_test.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3500, "end_line": 3524, "span_ids": ["TestParameterizedSubRequest.test_call_from_test"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestParameterizedSubRequest(object):\n\n def test_call_from_test(self, testdir):\n testdir.makepyfile(\n test_call_from_test=\"\"\"\n import pytest\n\n @pytest.fixture(params=[0, 1, 2])\n def fix_with_param(request):\n return request.param\n\n def test_foo(request):\n request.getfixturevalue('fix_with_param')\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"The requested fixture has no parameter defined for test:\",\n \" test_call_from_test.py::test_foo\",\n \"Requested fixture 'fix_with_param' defined in:\",\n \"test_call_from_test.py:4\",\n \"Requested here:\",\n \"test_call_from_test.py:8\",\n \"*1 failed*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestParameterizedSubRequest.test_external_fixture_TestParameterizedSubRequest.test_external_fixture.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestParameterizedSubRequest.test_external_fixture_TestParameterizedSubRequest.test_external_fixture.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3526, "end_line": 3555, "span_ids": ["TestParameterizedSubRequest.test_external_fixture"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestParameterizedSubRequest(object):\n\n def test_external_fixture(self, testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n\n @pytest.fixture(params=[0, 1, 2])\n def fix_with_param(request):\n return request.param\n \"\"\"\n )\n\n testdir.makepyfile(\n test_external_fixture=\"\"\"\n def test_foo(request):\n request.getfixturevalue('fix_with_param')\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"The requested fixture has no parameter defined for test:\",\n \" test_external_fixture.py::test_foo\",\n \"\",\n \"Requested fixture 'fix_with_param' defined in:\",\n \"conftest.py:4\",\n \"Requested here:\",\n \"test_external_fixture.py:2\",\n \"*1 failed*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestParameterizedSubRequest.test_non_relative_path_TestParameterizedSubRequest.test_non_relative_path.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestParameterizedSubRequest.test_non_relative_path_TestParameterizedSubRequest.test_non_relative_path.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3557, "end_line": 3599, "span_ids": ["TestParameterizedSubRequest.test_non_relative_path"], "tokens": 264}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestParameterizedSubRequest(object):\n\n def test_non_relative_path(self, testdir):\n tests_dir = testdir.mkdir(\"tests\")\n fixdir = testdir.mkdir(\"fixtures\")\n fixfile = fixdir.join(\"fix.py\")\n fixfile.write(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n\n @pytest.fixture(params=[0, 1, 2])\n def fix_with_param(request):\n return request.param\n \"\"\"\n )\n )\n\n testfile = tests_dir.join(\"test_foos.py\")\n testfile.write(\n textwrap.dedent(\n \"\"\"\\\n from fix import fix_with_param\n\n def test_foo(request):\n request.getfixturevalue('fix_with_param')\n \"\"\"\n )\n )\n\n tests_dir.chdir()\n testdir.syspathinsert(fixdir)\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"The requested fixture has no parameter defined for test:\",\n \" test_foos.py::test_foo\",\n \"\",\n \"Requested fixture 'fix_with_param' defined in:\",\n \"*fix.py:4\",\n \"Requested here:\",\n \"test_foos.py:4\",\n \"*1 failed*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_test_pytest_fixture_setup_and_post_finalizer_hook_test_pytest_fixture_setup_and_post_finalizer_hook.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_test_pytest_fixture_setup_and_post_finalizer_hook_test_pytest_fixture_setup_and_post_finalizer_hook.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3602, "end_line": 3645, "span_ids": ["test_pytest_fixture_setup_and_post_finalizer_hook"], "tokens": 379}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pytest_fixture_setup_and_post_finalizer_hook(testdir):\n testdir.makeconftest(\n \"\"\"\n from __future__ import print_function\n def pytest_fixture_setup(fixturedef, request):\n print('ROOT setup hook called for {0} from {1}'.format(fixturedef.argname, request.node.name))\n def pytest_fixture_post_finalizer(fixturedef, request):\n print('ROOT finalizer hook called for {0} from {1}'.format(fixturedef.argname, request.node.name))\n \"\"\"\n )\n testdir.makepyfile(\n **{\n \"tests/conftest.py\": \"\"\"\n from __future__ import print_function\n def pytest_fixture_setup(fixturedef, request):\n print('TESTS setup hook called for {0} from {1}'.format(fixturedef.argname, request.node.name))\n def pytest_fixture_post_finalizer(fixturedef, request):\n print('TESTS finalizer hook called for {0} from {1}'.format(fixturedef.argname, request.node.name))\n \"\"\",\n \"tests/test_hooks.py\": \"\"\"\n from __future__ import print_function\n import pytest\n\n @pytest.fixture()\n def my_fixture():\n return 'some'\n\n def test_func(my_fixture):\n print('TEST test_func')\n assert my_fixture == 'some'\n \"\"\",\n }\n )\n result = testdir.runpytest(\"-s\")\n assert result.ret == 0\n result.stdout.fnmatch_lines(\n [\n \"*TESTS setup hook called for my_fixture from test_func*\",\n \"*ROOT setup hook called for my_fixture from test_func*\",\n \"*TEST test_func*\",\n \"*TESTS finalizer hook called for my_fixture from test_func*\",\n \"*ROOT finalizer hook called for my_fixture from test_func*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestScopeOrdering_TestScopeOrdering.test_func_closure_module_auto.assert_request_fixturenam": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestScopeOrdering_TestScopeOrdering.test_func_closure_module_auto.assert_request_fixturenam", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3634, "end_line": 3670, "span_ids": ["TestScopeOrdering", "TestScopeOrdering.test_func_closure_module_auto"], "tokens": 308}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestScopeOrdering(object):\n \"\"\"Class of tests that ensure fixtures are ordered based on their scopes (#2405)\"\"\"\n\n @pytest.mark.parametrize(\"variant\", [\"mark\", \"autouse\"])\n @pytest.mark.issue(github=\"#2405\")\n def test_func_closure_module_auto(self, testdir, variant, monkeypatch):\n \"\"\"Semantically identical to the example posted in #2405 when ``use_mark=True``\"\"\"\n monkeypatch.setenv(\"FIXTURE_ACTIVATION_VARIANT\", variant)\n testdir.makepyfile(\n \"\"\"\n import warnings\n import os\n import pytest\n VAR = 'FIXTURE_ACTIVATION_VARIANT'\n VALID_VARS = ('autouse', 'mark')\n\n VARIANT = os.environ.get(VAR)\n if VARIANT is None or VARIANT not in VALID_VARS:\n warnings.warn(\"{!r} is not in {}, assuming autouse\".format(VARIANT, VALID_VARS) )\n variant = 'mark'\n\n @pytest.fixture(scope='module', autouse=VARIANT == 'autouse')\n def m1(): pass\n\n if VARIANT=='mark':\n pytestmark = pytest.mark.usefixtures('m1')\n\n @pytest.fixture(scope='function', autouse=True)\n def f1(): pass\n\n def test_func(m1):\n pass\n \"\"\"\n )\n items, _ = testdir.inline_genitems()\n request = FixtureRequest(items[0])\n assert request.fixturenames == \"m1 f1\".split()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestScopeOrdering.test_func_closure_with_native_fixtures_TestScopeOrdering.test_func_closure_with_native_fixtures.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestScopeOrdering.test_func_closure_with_native_fixtures_TestScopeOrdering.test_func_closure_with_native_fixtures.None_2", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3685, "end_line": 3737, "span_ids": ["TestScopeOrdering.test_func_closure_with_native_fixtures"], "tokens": 408}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestScopeOrdering(object):\n\n def test_func_closure_with_native_fixtures(self, testdir, monkeypatch):\n \"\"\"Sanity check that verifies the order returned by the closures and the actual fixture execution order:\n The execution order may differ because of fixture inter-dependencies.\n \"\"\"\n monkeypatch.setattr(pytest, \"FIXTURE_ORDER\", [], raising=False)\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n FIXTURE_ORDER = pytest.FIXTURE_ORDER\n\n @pytest.fixture(scope=\"session\")\n def s1():\n FIXTURE_ORDER.append('s1')\n\n @pytest.fixture(scope=\"package\")\n def p1():\n FIXTURE_ORDER.append('p1')\n\n @pytest.fixture(scope=\"module\")\n def m1():\n FIXTURE_ORDER.append('m1')\n\n @pytest.fixture(scope='session')\n def my_tmpdir_factory():\n FIXTURE_ORDER.append('my_tmpdir_factory')\n\n @pytest.fixture\n def my_tmpdir(my_tmpdir_factory):\n FIXTURE_ORDER.append('my_tmpdir')\n\n @pytest.fixture\n def f1(my_tmpdir):\n FIXTURE_ORDER.append('f1')\n\n @pytest.fixture\n def f2():\n FIXTURE_ORDER.append('f2')\n\n def test_foo(f1, p1, m1, f2, s1): pass\n \"\"\"\n )\n items, _ = testdir.inline_genitems()\n request = FixtureRequest(items[0])\n # order of fixtures based on their scope and position in the parameter list\n assert (\n request.fixturenames == \"s1 my_tmpdir_factory p1 m1 f1 f2 my_tmpdir\".split()\n )\n testdir.runpytest()\n # actual fixture execution differs: dependent fixtures must be created first (\"my_tmpdir\")\n assert (\n pytest.FIXTURE_ORDER == \"s1 my_tmpdir_factory p1 m1 my_tmpdir f1 f2\".split()\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestScopeOrdering.test_func_closure_module_TestScopeOrdering.test_func_closure_module.assert_request_fixturenam": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestScopeOrdering.test_func_closure_module_TestScopeOrdering.test_func_closure_module.assert_request_fixturenam", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3739, "end_line": 3756, "span_ids": ["TestScopeOrdering.test_func_closure_module"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestScopeOrdering(object):\n\n def test_func_closure_module(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture(scope='module')\n def m1(): pass\n\n @pytest.fixture(scope='function')\n def f1(): pass\n\n def test_func(f1, m1):\n pass\n \"\"\"\n )\n items, _ = testdir.inline_genitems()\n request = FixtureRequest(items[0])\n assert request.fixturenames == \"m1 f1\".split()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestScopeOrdering.test_func_closure_scopes_reordered_TestScopeOrdering.test_func_closure_scopes_reordered.assert_request_fixturenam": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestScopeOrdering.test_func_closure_scopes_reordered_TestScopeOrdering.test_func_closure_scopes_reordered.assert_request_fixturenam", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3758, "end_line": 3789, "span_ids": ["TestScopeOrdering.test_func_closure_scopes_reordered"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestScopeOrdering(object):\n\n def test_func_closure_scopes_reordered(self, testdir):\n \"\"\"Test ensures that fixtures are ordered by scope regardless of the order of the parameters, although\n fixtures of same scope keep the declared order\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture(scope='session')\n def s1(): pass\n\n @pytest.fixture(scope='module')\n def m1(): pass\n\n @pytest.fixture(scope='function')\n def f1(): pass\n\n @pytest.fixture(scope='function')\n def f2(): pass\n\n class Test:\n\n @pytest.fixture(scope='class')\n def c1(cls): pass\n\n def test_func(self, f2, f1, c1, m1, s1):\n pass\n \"\"\"\n )\n items, _ = testdir.inline_genitems()\n request = FixtureRequest(items[0])\n assert request.fixturenames == \"s1 m1 c1 f2 f1\".split()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestScopeOrdering.test_func_closure_same_scope_closer_root_first_TestScopeOrdering.test_func_closure_same_scope_closer_root_first.assert_request_fixturenam": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestScopeOrdering.test_func_closure_same_scope_closer_root_first_TestScopeOrdering.test_func_closure_same_scope_closer_root_first.assert_request_fixturenam", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3791, "end_line": 3829, "span_ids": ["TestScopeOrdering.test_func_closure_same_scope_closer_root_first"], "tokens": 255}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestScopeOrdering(object):\n\n def test_func_closure_same_scope_closer_root_first(self, testdir):\n \"\"\"Auto-use fixtures of same scope are ordered by closer-to-root first\"\"\"\n testdir.makeconftest(\n \"\"\"\n import pytest\n\n @pytest.fixture(scope='module', autouse=True)\n def m_conf(): pass\n \"\"\"\n )\n testdir.makepyfile(\n **{\n \"sub/conftest.py\": \"\"\"\n import pytest\n\n @pytest.fixture(scope='package', autouse=True)\n def p_sub(): pass\n\n @pytest.fixture(scope='module', autouse=True)\n def m_sub(): pass\n \"\"\",\n \"sub/__init__.py\": \"\",\n \"sub/test_func.py\": \"\"\"\n import pytest\n\n @pytest.fixture(scope='module', autouse=True)\n def m_test(): pass\n\n @pytest.fixture(scope='function')\n def f1(): pass\n\n def test_func(m_test, f1):\n pass\n \"\"\",\n }\n )\n items, _ = testdir.inline_genitems()\n request = FixtureRequest(items[0])\n assert request.fixturenames == \"p_sub m_conf m_sub m_test f1\".split()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestScopeOrdering.test_func_closure_all_scopes_complex_TestScopeOrdering.test_func_closure_all_scopes_complex.assert_request_fixturenam": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestScopeOrdering.test_func_closure_all_scopes_complex_TestScopeOrdering.test_func_closure_all_scopes_complex.assert_request_fixturenam", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3831, "end_line": 3873, "span_ids": ["TestScopeOrdering.test_func_closure_all_scopes_complex"], "tokens": 273}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestScopeOrdering(object):\n\n def test_func_closure_all_scopes_complex(self, testdir):\n \"\"\"Complex test involving all scopes and mixing autouse with normal fixtures\"\"\"\n testdir.makeconftest(\n \"\"\"\n import pytest\n\n @pytest.fixture(scope='session')\n def s1(): pass\n\n @pytest.fixture(scope='package', autouse=True)\n def p1(): pass\n \"\"\"\n )\n testdir.makepyfile(**{\"__init__.py\": \"\"})\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture(scope='module', autouse=True)\n def m1(): pass\n\n @pytest.fixture(scope='module')\n def m2(s1): pass\n\n @pytest.fixture(scope='function')\n def f1(): pass\n\n @pytest.fixture(scope='function')\n def f2(): pass\n\n class Test:\n\n @pytest.fixture(scope='class', autouse=True)\n def c1(self):\n pass\n\n def test_func(self, f2, f1, m2):\n pass\n \"\"\"\n )\n items, _ = testdir.inline_genitems()\n request = FixtureRequest(items[0])\n assert request.fixturenames == \"s1 p1 m1 m2 c1 f2 f1\".split()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestScopeOrdering.test_multiple_packages_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/fixtures.py_TestScopeOrdering.test_multiple_packages_", "embedding": null, "metadata": {"file_path": "testing/python/fixtures.py", "file_name": "fixtures.py", "file_type": "text/x-python", "category": "implementation", "start_line": 3875, "end_line": 3953, "span_ids": ["test_call_fixture_function_error", "TestScopeOrdering.test_multiple_packages"], "tokens": 503}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestScopeOrdering(object):\n\n def test_multiple_packages(self, testdir):\n \"\"\"Complex test involving multiple package fixtures. Make sure teardowns\n are executed in order.\n .\n \u2514\u2500\u2500 root\n \u251c\u2500\u2500 __init__.py\n \u251c\u2500\u2500 sub1\n \u2502 \u251c\u2500\u2500 __init__.py\n \u2502 \u251c\u2500\u2500 conftest.py\n \u2502 \u2514\u2500\u2500 test_1.py\n \u2514\u2500\u2500 sub2\n \u251c\u2500\u2500 __init__.py\n \u251c\u2500\u2500 conftest.py\n \u2514\u2500\u2500 test_2.py\n \"\"\"\n root = testdir.mkdir(\"root\")\n root.join(\"__init__.py\").write(\"values = []\")\n sub1 = root.mkdir(\"sub1\")\n sub1.ensure(\"__init__.py\")\n sub1.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n from .. import values\n @pytest.fixture(scope=\"package\")\n def fix():\n values.append(\"pre-sub1\")\n yield values\n assert values.pop() == \"pre-sub1\"\n \"\"\"\n )\n )\n sub1.join(\"test_1.py\").write(\n textwrap.dedent(\n \"\"\"\\\n from .. import values\n def test_1(fix):\n assert values == [\"pre-sub1\"]\n \"\"\"\n )\n )\n sub2 = root.mkdir(\"sub2\")\n sub2.ensure(\"__init__.py\")\n sub2.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n from .. import values\n @pytest.fixture(scope=\"package\")\n def fix():\n values.append(\"pre-sub2\")\n yield values\n assert values.pop() == \"pre-sub2\"\n \"\"\"\n )\n )\n sub2.join(\"test_2.py\").write(\n textwrap.dedent(\n \"\"\"\\\n from .. import values\n def test_2(fix):\n assert values == [\"pre-sub2\"]\n \"\"\"\n )\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=2)\n\n\ndef test_call_fixture_function_error():\n \"\"\"Check if an error is raised if a fixture function is called directly (#4545)\"\"\"\n\n @pytest.fixture\n def fix():\n return 1\n\n with pytest.raises(pytest.fail.Exception):\n assert fix() == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_pytest_TestOEJSKITSpecials.test_funcarg_non_pycollectobj.assert_clscol_funcargs_a": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_pytest_TestOEJSKITSpecials.test_funcarg_non_pycollectobj.assert_clscol_funcargs_a", "embedding": null, "metadata": {"file_path": "testing/python/integration.py", "file_name": "integration.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 35, "span_ids": ["imports", "TestOEJSKITSpecials.test_funcarg_non_pycollectobj", "TestOEJSKITSpecials"], "tokens": 240}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\nfrom _pytest import python\nfrom _pytest import runner\n\n\nclass TestOEJSKITSpecials(object):\n def test_funcarg_non_pycollectobj(self, testdir): # rough jstests usage\n testdir.makeconftest(\n \"\"\"\n import pytest\n def pytest_pycollect_makeitem(collector, name, obj):\n if name == \"MyClass\":\n return MyCollector(name, parent=collector)\n class MyCollector(pytest.Collector):\n def reportinfo(self):\n return self.fspath, 3, \"xyz\"\n \"\"\"\n )\n modcol = testdir.getmodulecol(\n \"\"\"\n import pytest\n @pytest.fixture\n def arg1(request):\n return 42\n class MyClass(object):\n pass\n \"\"\"\n )\n # this hook finds funcarg factories\n rep = runner.collect_one_node(collector=modcol)\n clscol = rep.result[0]\n clscol.obj = lambda arg1: None\n clscol.funcargs = {}\n pytest._fillfuncargs(clscol)\n assert clscol.funcargs[\"arg1\"] == 42", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestOEJSKITSpecials.test_autouse_fixture_test_wrapped_getfslineno.assert_lineno_lineno2_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestOEJSKITSpecials.test_autouse_fixture_test_wrapped_getfslineno.assert_lineno_lineno2_", "embedding": null, "metadata": {"file_path": "testing/python/integration.py", "file_name": "integration.py", "file_type": "text/x-python", "category": "implementation", "start_line": 37, "end_line": 86, "span_ids": ["test_wrapped_getfslineno", "TestOEJSKITSpecials.test_autouse_fixture"], "tokens": 337}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestOEJSKITSpecials(object):\n\n def test_autouse_fixture(self, testdir): # rough jstests usage\n testdir.makeconftest(\n \"\"\"\n import pytest\n def pytest_pycollect_makeitem(collector, name, obj):\n if name == \"MyClass\":\n return MyCollector(name, parent=collector)\n class MyCollector(pytest.Collector):\n def reportinfo(self):\n return self.fspath, 3, \"xyz\"\n \"\"\"\n )\n modcol = testdir.getmodulecol(\n \"\"\"\n import pytest\n @pytest.fixture(autouse=True)\n def hello():\n pass\n @pytest.fixture\n def arg1(request):\n return 42\n class MyClass(object):\n pass\n \"\"\"\n )\n # this hook finds funcarg factories\n rep = runner.collect_one_node(modcol)\n clscol = rep.result[0]\n clscol.obj = lambda: None\n clscol.funcargs = {}\n pytest._fillfuncargs(clscol)\n assert not clscol.funcargs\n\n\ndef test_wrapped_getfslineno():\n def func():\n pass\n\n def wrap(f):\n func.__wrapped__ = f\n func.patchings = [\"qwe\"]\n return func\n\n @wrap\n def wrapped_func(x, y, z):\n pass\n\n fs, lineno = python.getfslineno(wrapped_func)\n fs2, lineno2 = python.getfslineno(wrap)\n assert lineno > lineno2, \"getfslineno does not unwrap correctly\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestMockDecoration_TestMockDecoration.test_unittest_mock.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestMockDecoration_TestMockDecoration.test_unittest_mock.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/integration.py", "file_name": "integration.py", "file_type": "text/x-python", "category": "implementation", "start_line": 89, "end_line": 142, "span_ids": ["TestMockDecoration.test_wrapped_getfuncargnames", "TestMockDecoration", "TestMockDecoration.test_wrapped_getfuncargnames_patching", "TestMockDecoration.test_unittest_mock"], "tokens": 300}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMockDecoration(object):\n def test_wrapped_getfuncargnames(self):\n from _pytest.compat import getfuncargnames\n\n def wrap(f):\n def func():\n pass\n\n func.__wrapped__ = f\n return func\n\n @wrap\n def f(x):\n pass\n\n values = getfuncargnames(f)\n assert values == (\"x\",)\n\n @pytest.mark.xfail(\n strict=False, reason=\"getfuncargnames breaks if mock is imported\"\n )\n def test_wrapped_getfuncargnames_patching(self):\n from _pytest.compat import getfuncargnames\n\n def wrap(f):\n def func():\n pass\n\n func.__wrapped__ = f\n func.patchings = [\"qwe\"]\n return func\n\n @wrap\n def f(x, y, z):\n pass\n\n values = getfuncargnames(f)\n assert values == (\"y\", \"z\")\n\n def test_unittest_mock(self, testdir):\n pytest.importorskip(\"unittest.mock\")\n testdir.makepyfile(\n \"\"\"\n import unittest.mock\n class T(unittest.TestCase):\n @unittest.mock.patch(\"os.path.abspath\")\n def test_hello(self, abspath):\n import os\n os.path.abspath(\"hello\")\n abspath.assert_any_call(\"hello\")\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestMockDecoration.test_unittest_mock_and_fixture_TestMockDecoration.test_unittest_mock_and_fixture.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestMockDecoration.test_unittest_mock_and_fixture_TestMockDecoration.test_unittest_mock_and_fixture.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/integration.py", "file_name": "integration.py", "file_type": "text/x-python", "category": "implementation", "start_line": 144, "end_line": 164, "span_ids": ["TestMockDecoration.test_unittest_mock_and_fixture"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMockDecoration(object):\n\n def test_unittest_mock_and_fixture(self, testdir):\n pytest.importorskip(\"unittest.mock\")\n testdir.makepyfile(\n \"\"\"\n import os.path\n import unittest.mock\n import pytest\n\n @pytest.fixture\n def inject_me():\n pass\n\n @unittest.mock.patch.object(os.path, \"abspath\",\n new=unittest.mock.MagicMock)\n def test_hello(inject_me):\n import os\n os.path.abspath(\"hello\")\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestMockDecoration.test_unittest_mock_and_pypi_mock_TestMockDecoration.test_unittest_mock_and_pypi_mock.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestMockDecoration.test_unittest_mock_and_pypi_mock_TestMockDecoration.test_unittest_mock_and_pypi_mock.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/integration.py", "file_name": "integration.py", "file_type": "text/x-python", "category": "implementation", "start_line": 166, "end_line": 188, "span_ids": ["TestMockDecoration.test_unittest_mock_and_pypi_mock"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMockDecoration(object):\n\n def test_unittest_mock_and_pypi_mock(self, testdir):\n pytest.importorskip(\"unittest.mock\")\n pytest.importorskip(\"mock\", \"1.0.1\")\n testdir.makepyfile(\n \"\"\"\n import mock\n import unittest.mock\n class TestBoth(object):\n @unittest.mock.patch(\"os.path.abspath\")\n def test_hello(self, abspath):\n import os\n os.path.abspath(\"hello\")\n abspath.assert_any_call(\"hello\")\n\n @mock.patch(\"os.path.abspath\")\n def test_hello_mock(self, abspath):\n import os\n os.path.abspath(\"hello\")\n abspath.assert_any_call(\"hello\")\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestMockDecoration.test_mock_TestMockDecoration.test_mock.assert_funcnames_T_t": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestMockDecoration.test_mock_TestMockDecoration.test_mock.assert_funcnames_T_t", "embedding": null, "metadata": {"file_path": "testing/python/integration.py", "file_name": "integration.py", "file_type": "text/x-python", "category": "implementation", "start_line": 190, "end_line": 221, "span_ids": ["TestMockDecoration.test_mock"], "tokens": 272}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMockDecoration(object):\n\n def test_mock(self, testdir):\n pytest.importorskip(\"mock\", \"1.0.1\")\n testdir.makepyfile(\n \"\"\"\n import os\n import unittest\n import mock\n\n class T(unittest.TestCase):\n @mock.patch(\"os.path.abspath\")\n def test_hello(self, abspath):\n os.path.abspath(\"hello\")\n abspath.assert_any_call(\"hello\")\n def mock_basename(path):\n return \"mock_basename\"\n @mock.patch(\"os.path.abspath\")\n @mock.patch(\"os.path.normpath\")\n @mock.patch(\"os.path.basename\", new=mock_basename)\n def test_someting(normpath, abspath, tmpdir):\n abspath.return_value = \"this\"\n os.path.normpath(os.path.abspath(\"hello\"))\n normpath.assert_any_call(\"this\")\n assert os.path.basename(\"123\") == \"mock_basename\"\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=2)\n calls = reprec.getcalls(\"pytest_runtest_logreport\")\n funcnames = [\n call.report.location[2] for call in calls if call.report.when == \"call\"\n ]\n assert funcnames == [\"T.test_hello\", \"test_someting\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestMockDecoration.test_mock_sorting_TestMockDecoration.test_mock_sorting.assert_names_test_on": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestMockDecoration.test_mock_sorting_TestMockDecoration.test_mock_sorting.assert_names_test_on", "embedding": null, "metadata": {"file_path": "testing/python/integration.py", "file_name": "integration.py", "file_type": "text/x-python", "category": "implementation", "start_line": 223, "end_line": 245, "span_ids": ["TestMockDecoration.test_mock_sorting"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMockDecoration(object):\n\n def test_mock_sorting(self, testdir):\n pytest.importorskip(\"mock\", \"1.0.1\")\n testdir.makepyfile(\n \"\"\"\n import os\n import mock\n\n @mock.patch(\"os.path.abspath\")\n def test_one(abspath):\n pass\n @mock.patch(\"os.path.abspath\")\n def test_two(abspath):\n pass\n @mock.patch(\"os.path.abspath\")\n def test_three(abspath):\n pass\n \"\"\"\n )\n reprec = testdir.inline_run()\n calls = reprec.getreports(\"pytest_runtest_logreport\")\n calls = [x for x in calls if x.when == \"call\"]\n names = [x.nodeid.split(\"::\")[-1] for x in calls]\n assert names == [\"test_one\", \"test_two\", \"test_three\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestMockDecoration.test_mock_double_patch_issue473_TestMockDecoration.test_mock_double_patch_issue473.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestMockDecoration.test_mock_double_patch_issue473_TestMockDecoration.test_mock_double_patch_issue473.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/integration.py", "file_name": "integration.py", "file_type": "text/x-python", "category": "implementation", "start_line": 247, "end_line": 263, "span_ids": ["TestMockDecoration.test_mock_double_patch_issue473"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMockDecoration(object):\n\n def test_mock_double_patch_issue473(self, testdir):\n pytest.importorskip(\"mock\", \"1.0.1\")\n testdir.makepyfile(\n \"\"\"\n from mock import patch\n from pytest import mark\n\n @patch('os.getcwd')\n @patch('os.path')\n @mark.slow\n class TestSimple(object):\n def test_simple_thing(self, mock_path, mock_getcwd):\n pass\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestReRunTests_TestReRunTests.test_rerun.None_3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestReRunTests_TestReRunTests.test_rerun.None_3", "embedding": null, "metadata": {"file_path": "testing/python/integration.py", "file_name": "integration.py", "file_type": "text/x-python", "category": "implementation", "start_line": 266, "end_line": 303, "span_ids": ["TestReRunTests.test_rerun", "TestReRunTests"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestReRunTests(object):\n def test_rerun(self, testdir):\n testdir.makeconftest(\n \"\"\"\n from _pytest.runner import runtestprotocol\n def pytest_runtest_protocol(item, nextitem):\n runtestprotocol(item, log=False, nextitem=nextitem)\n runtestprotocol(item, log=True, nextitem=nextitem)\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n import pytest\n count = 0\n req = None\n @pytest.fixture\n def fix(request):\n global count, req\n assert request != req\n req = request\n print(\"fix count %s\" % count)\n count += 1\n def test_fix(fix):\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"-s\")\n result.stdout.fnmatch_lines(\n \"\"\"\n *fix count 0*\n *fix count 1*\n \"\"\"\n )\n result.stdout.fnmatch_lines(\n \"\"\"\n *2 passed*\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_test_pytestconfig_is_session_scoped_TestNoselikeTestAttribute.test_class_and_method.assert_not_calls": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_test_pytestconfig_is_session_scoped_TestNoselikeTestAttribute.test_class_and_method.assert_not_calls", "embedding": null, "metadata": {"file_path": "testing/python/integration.py", "file_name": "integration.py", "file_type": "text/x-python", "category": "implementation", "start_line": 306, "end_line": 343, "span_ids": ["TestNoselikeTestAttribute.test_module_with_global_test", "TestNoselikeTestAttribute.test_class_and_method", "test_pytestconfig_is_session_scoped", "TestNoselikeTestAttribute"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pytestconfig_is_session_scoped():\n from _pytest.fixtures import pytestconfig\n\n assert pytestconfig._pytestfixturefunction.scope == \"session\"\n\n\nclass TestNoselikeTestAttribute(object):\n def test_module_with_global_test(self, testdir):\n testdir.makepyfile(\n \"\"\"\n __test__ = False\n def test_hello():\n pass\n \"\"\"\n )\n reprec = testdir.inline_run()\n assert not reprec.getfailedcollections()\n calls = reprec.getreports(\"pytest_runtest_logreport\")\n assert not calls\n\n def test_class_and_method(self, testdir):\n testdir.makepyfile(\n \"\"\"\n __test__ = True\n def test_func():\n pass\n test_func.__test__ = False\n\n class TestSome(object):\n __test__ = False\n def test_method(self):\n pass\n \"\"\"\n )\n reprec = testdir.inline_run()\n assert not reprec.getfailedcollections()\n calls = reprec.getreports(\"pytest_runtest_logreport\")\n assert not calls", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestNoselikeTestAttribute.test_unittest_class_TestNoselikeTestAttribute.test_unittest_class.assert_call_items_0_cls_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestNoselikeTestAttribute.test_unittest_class_TestNoselikeTestAttribute.test_unittest_class.assert_call_items_0_cls_", "embedding": null, "metadata": {"file_path": "testing/python/integration.py", "file_name": "integration.py", "file_type": "text/x-python", "category": "implementation", "start_line": 345, "end_line": 362, "span_ids": ["TestNoselikeTestAttribute.test_unittest_class"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestNoselikeTestAttribute(object):\n\n def test_unittest_class(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import unittest\n class TC(unittest.TestCase):\n def test_1(self):\n pass\n class TC2(unittest.TestCase):\n __test__ = False\n def test_2(self):\n pass\n \"\"\"\n )\n reprec = testdir.inline_run()\n assert not reprec.getfailedcollections()\n call = reprec.getcalls(\"pytest_collection_modifyitems\")[0]\n assert len(call.items) == 1\n assert call.items[0].cls.__name__ == \"TC\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestNoselikeTestAttribute.test_class_with_nasty_getattr_TestNoselikeTestAttribute.test_class_with_nasty_getattr.assert_not_call_items": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestNoselikeTestAttribute.test_class_with_nasty_getattr_TestNoselikeTestAttribute.test_class_with_nasty_getattr.assert_not_call_items", "embedding": null, "metadata": {"file_path": "testing/python/integration.py", "file_name": "integration.py", "file_type": "text/x-python", "category": "implementation", "start_line": 364, "end_line": 393, "span_ids": ["TestNoselikeTestAttribute.test_class_with_nasty_getattr"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestNoselikeTestAttribute(object):\n\n def test_class_with_nasty_getattr(self, testdir):\n \"\"\"Make sure we handle classes with a custom nasty __getattr__ right.\n\n With a custom __getattr__ which e.g. returns a function (like with a\n RPC wrapper), we shouldn't assume this meant \"__test__ = True\".\n \"\"\"\n # https://github.com/pytest-dev/pytest/issues/1204\n testdir.makepyfile(\n \"\"\"\n class MetaModel(type):\n\n def __getattr__(cls, key):\n return lambda: None\n\n\n BaseModel = MetaModel('Model', (), {})\n\n\n class Model(BaseModel):\n\n __metaclass__ = MetaModel\n\n def test_blah(self):\n pass\n \"\"\"\n )\n reprec = testdir.inline_run()\n assert not reprec.getfailedcollections()\n call = reprec.getcalls(\"pytest_collection_modifyitems\")[0]\n assert not call.items", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestParameterize_TestParameterize.test_idfn_marker.res_stdout_fnmatch_lines_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestParameterize_TestParameterize.test_idfn_marker.res_stdout_fnmatch_lines_", "embedding": null, "metadata": {"file_path": "testing/python/integration.py", "file_name": "integration.py", "file_type": "text/x-python", "category": "implementation", "start_line": 396, "end_line": 417, "span_ids": ["TestParameterize", "TestParameterize.test_idfn_marker"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.issue(351)\nclass TestParameterize(object):\n def test_idfn_marker(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n def idfn(param):\n if param == 0:\n return 'spam'\n elif param == 1:\n return 'ham'\n else:\n return None\n\n @pytest.mark.parametrize('a,b', [(0, 2), (1, 2)], ids=idfn)\n def test_params(a, b):\n pass\n \"\"\"\n )\n res = testdir.runpytest(\"--collect-only\")\n res.stdout.fnmatch_lines([\"*spam-2*\", \"*ham-2*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestParameterize.test_idfn_fixture_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/integration.py_TestParameterize.test_idfn_fixture_", "embedding": null, "metadata": {"file_path": "testing/python/integration.py", "file_name": "integration.py", "file_type": "text/x-python", "category": "implementation", "start_line": 419, "end_line": 446, "span_ids": ["TestParameterize.test_idfn_fixture"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.issue(351)\nclass TestParameterize(object):\n\n def test_idfn_fixture(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n def idfn(param):\n if param == 0:\n return 'spam'\n elif param == 1:\n return 'ham'\n else:\n return None\n\n @pytest.fixture(params=[0, 1], ids=idfn)\n def a(request):\n return request.param\n\n @pytest.fixture(params=[1, 2], ids=idfn)\n def b(request):\n return request.param\n\n def test_params(a, b):\n pass\n \"\"\"\n )\n res = testdir.runpytest(\"--collect-only\")\n res.stdout.fnmatch_lines([\"*spam-2*\", \"*ham-2*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py__coding_utf_8__TestMetafunc.test_function_basic.assert_metafunc_cls_is_No": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py__coding_utf_8__TestMetafunc.test_function_basic.assert_metafunc_cls_is_No", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 55, "span_ids": ["impl", "TestMetafunc.test_no_funcargs", "TestMetafunc.test_function_basic", "TestMetafunc.Metafunc", "TestMetafunc.Metafunc.FixtureInfo:2", "TestMetafunc.Metafunc.DefinitionMock", "docstring", "TestMetafunc", "TestMetafunc.Metafunc.FixtureInfo", "imports", "TestMetafunc.Metafunc.DefinitionMock:2"], "tokens": 324}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -*- coding: utf-8 -*-\nimport re\nimport sys\nimport textwrap\n\nimport attr\nimport hypothesis\nimport six\nfrom hypothesis import strategies\n\nimport pytest\nfrom _pytest import fixtures\nfrom _pytest import python\nfrom _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG\n\nPY3 = sys.version_info >= (3, 0)\n\n\nclass TestMetafunc(object):\n def Metafunc(self, func, config=None):\n # the unit tests of this class check if things work correctly\n # on the funcarg level, so we don't need a full blown\n # initiliazation\n class FixtureInfo(object):\n name2fixturedefs = None\n\n def __init__(self, names):\n self.names_closure = names\n\n @attr.s\n class DefinitionMock(object):\n obj = attr.ib()\n\n names = fixtures.getfuncargnames(func)\n fixtureinfo = FixtureInfo(names)\n definition = DefinitionMock(func)\n return python.Metafunc(definition, fixtureinfo, config)\n\n def test_no_funcargs(self, testdir):\n def function():\n pass\n\n metafunc = self.Metafunc(function)\n assert not metafunc.fixturenames\n repr(metafunc._calls)\n\n def test_function_basic(self):\n def func(arg1, arg2=\"qwe\"):\n pass\n\n metafunc = self.Metafunc(func)\n assert len(metafunc.fixturenames) == 1\n assert \"arg1\" in metafunc.fixturenames\n assert metafunc.function is func\n assert metafunc.cls is None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_error_TestMetafunc.test_parametrize_bad_scope.with_pytest_raises_.metafunc_parametrize_x_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_error_TestMetafunc.test_parametrize_bad_scope.with_pytest_raises_.metafunc_parametrize_x_", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 57, "end_line": 78, "span_ids": ["TestMetafunc.test_parametrize_error", "TestMetafunc.test_parametrize_bad_scope"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n def test_parametrize_error(self):\n def func(x, y):\n pass\n\n metafunc = self.Metafunc(func)\n metafunc.parametrize(\"x\", [1, 2])\n pytest.raises(ValueError, lambda: metafunc.parametrize(\"x\", [5, 6]))\n pytest.raises(ValueError, lambda: metafunc.parametrize(\"x\", [5, 6]))\n metafunc.parametrize(\"y\", [1, 2])\n pytest.raises(ValueError, lambda: metafunc.parametrize(\"y\", [5, 6]))\n pytest.raises(ValueError, lambda: metafunc.parametrize(\"y\", [5, 6]))\n\n def test_parametrize_bad_scope(self, testdir):\n def func(x):\n pass\n\n metafunc = self.Metafunc(func)\n with pytest.raises(\n pytest.fail.Exception,\n match=r\"parametrize\\(\\) call in func got an unexpected scope value 'doggy'\",\n ):\n metafunc.parametrize(\"x\", [1], scope=\"doggy\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_find_parametrized_scope_TestMetafunc.test_find_parametrized_scope.None_12": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_find_parametrized_scope_TestMetafunc.test_find_parametrized_scope.None_12", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 80, "end_line": 124, "span_ids": ["TestMetafunc.test_find_parametrized_scope.DummyFixtureDef", "TestMetafunc.test_find_parametrized_scope.DummyFixtureDef:2", "TestMetafunc.test_find_parametrized_scope"], "tokens": 450}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n def test_find_parametrized_scope(self):\n \"\"\"unittest for _find_parametrized_scope (#3941)\"\"\"\n from _pytest.python import _find_parametrized_scope\n\n @attr.s\n class DummyFixtureDef(object):\n scope = attr.ib()\n\n fixtures_defs = dict(\n session_fix=[DummyFixtureDef(\"session\")],\n package_fix=[DummyFixtureDef(\"package\")],\n module_fix=[DummyFixtureDef(\"module\")],\n class_fix=[DummyFixtureDef(\"class\")],\n func_fix=[DummyFixtureDef(\"function\")],\n )\n\n # use arguments to determine narrow scope; the cause of the bug is that it would look on all\n # fixture defs given to the method\n def find_scope(argnames, indirect):\n return _find_parametrized_scope(argnames, fixtures_defs, indirect=indirect)\n\n assert find_scope([\"func_fix\"], indirect=True) == \"function\"\n assert find_scope([\"class_fix\"], indirect=True) == \"class\"\n assert find_scope([\"module_fix\"], indirect=True) == \"module\"\n assert find_scope([\"package_fix\"], indirect=True) == \"package\"\n assert find_scope([\"session_fix\"], indirect=True) == \"session\"\n\n assert find_scope([\"class_fix\", \"func_fix\"], indirect=True) == \"function\"\n assert find_scope([\"func_fix\", \"session_fix\"], indirect=True) == \"function\"\n assert find_scope([\"session_fix\", \"class_fix\"], indirect=True) == \"class\"\n assert find_scope([\"package_fix\", \"session_fix\"], indirect=True) == \"package\"\n assert find_scope([\"module_fix\", \"session_fix\"], indirect=True) == \"module\"\n\n # when indirect is False or is not for all scopes, always use function\n assert find_scope([\"session_fix\", \"module_fix\"], indirect=False) == \"function\"\n assert (\n find_scope([\"session_fix\", \"module_fix\"], indirect=[\"module_fix\"])\n == \"function\"\n )\n assert (\n find_scope(\n [\"session_fix\", \"module_fix\"], indirect=[\"session_fix\", \"module_fix\"]\n )\n == \"module\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_and_id_TestMetafunc.test_parametrize_empty_list.assert_skip_metafunc": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_and_id_TestMetafunc.test_parametrize_empty_list.assert_skip_metafunc", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 126, "end_line": 180, "span_ids": ["TestMetafunc.test_parametrize_and_id_unicode", "TestMetafunc.test_parametrize_and_id", "TestMetafunc.test_parametrize_with_wrong_number_of_ids", "TestMetafunc.test_parametrize_empty_list", "TestMetafunc.test_parametrize_empty_list.MockConfig.getini", "TestMetafunc.test_parametrize_empty_list.MockConfig"], "tokens": 409}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n def test_parametrize_and_id(self):\n def func(x, y):\n pass\n\n metafunc = self.Metafunc(func)\n\n metafunc.parametrize(\"x\", [1, 2], ids=[\"basic\", \"advanced\"])\n metafunc.parametrize(\"y\", [\"abc\", \"def\"])\n ids = [x.id for x in metafunc._calls]\n assert ids == [\"basic-abc\", \"basic-def\", \"advanced-abc\", \"advanced-def\"]\n\n def test_parametrize_and_id_unicode(self):\n \"\"\"Allow unicode strings for \"ids\" parameter in Python 2 (##1905)\"\"\"\n\n def func(x):\n pass\n\n metafunc = self.Metafunc(func)\n metafunc.parametrize(\"x\", [1, 2], ids=[u\"basic\", u\"advanced\"])\n ids = [x.id for x in metafunc._calls]\n assert ids == [u\"basic\", u\"advanced\"]\n\n def test_parametrize_with_wrong_number_of_ids(self, testdir):\n def func(x, y):\n pass\n\n metafunc = self.Metafunc(func)\n\n with pytest.raises(pytest.fail.Exception):\n metafunc.parametrize(\"x\", [1, 2], ids=[\"basic\"])\n\n with pytest.raises(pytest.fail.Exception):\n metafunc.parametrize(\n (\"x\", \"y\"), [(\"abc\", \"def\"), (\"ghi\", \"jkl\")], ids=[\"one\"]\n )\n\n @pytest.mark.issue(510)\n def test_parametrize_empty_list(self):\n def func(y):\n pass\n\n class MockConfig(object):\n def getini(self, name):\n return \"\"\n\n @property\n def hook(self):\n return self\n\n def pytest_make_parametrize_id(self, **kw):\n pass\n\n metafunc = self.Metafunc(func, MockConfig())\n metafunc.parametrize(\"y\", [])\n assert \"skip\" == metafunc._calls[0].marks[0].name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_with_userobjects_TestMetafunc.test_parametrize_with_userobjects.assert_metafunc__calls_3_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_with_userobjects_TestMetafunc.test_parametrize_with_userobjects.assert_metafunc__calls_3_", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 183, "end_line": 197, "span_ids": ["TestMetafunc.test_parametrize_with_userobjects.A:2", "TestMetafunc.test_parametrize_with_userobjects.A", "TestMetafunc.test_parametrize_with_userobjects"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n def test_parametrize_with_userobjects(self):\n def func(x, y):\n pass\n\n metafunc = self.Metafunc(func)\n\n class A(object):\n pass\n\n metafunc.parametrize(\"x\", [A(), A()])\n metafunc.parametrize(\"y\", list(\"ab\"))\n assert metafunc._calls[0].id == \"x0-a\"\n assert metafunc._calls[1].id == \"x0-b\"\n assert metafunc._calls[2].id == \"x1-a\"\n assert metafunc._calls[3].id == \"x1-b\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_idval_hypothesis_TestMetafunc.test_idval_hypothesis.escaped_encode_ascii_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_idval_hypothesis_TestMetafunc.test_idval_hypothesis.escaped_encode_ascii_", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 199, "end_line": 208, "span_ids": ["TestMetafunc.test_idval_hypothesis"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n @hypothesis.given(strategies.text() | strategies.binary())\n @hypothesis.settings(\n deadline=400.0\n ) # very close to std deadline and CI boxes are not reliable in CPU power\n def test_idval_hypothesis(self, value):\n from _pytest.python import _idval\n\n escaped = _idval(value, \"a\", 6, None, item=None, config=None)\n assert isinstance(escaped, six.text_type)\n escaped.encode(\"ascii\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_unicode_idval_TestMetafunc.test_unicode_idval.for_val_expected_in_valu.assert__idval_val_a_6": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_unicode_idval_TestMetafunc.test_unicode_idval.for_val_expected_in_valu.assert__idval_val_a_6", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 210, "end_line": 230, "span_ids": ["TestMetafunc.test_unicode_idval"], "tokens": 288}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n def test_unicode_idval(self):\n \"\"\"This tests that Unicode strings outside the ASCII character set get\n escaped, using byte escapes if they're in that range or unicode\n escapes if they're not.\n\n \"\"\"\n from _pytest.python import _idval\n\n values = [\n (u\"\", \"\"),\n (u\"ascii\", \"ascii\"),\n (u\"a\u00e7\u00e3o\", \"a\\\\xe7\\\\xe3o\"),\n (u\"jos\u00e9@blah.com\", \"jos\\\\xe9@blah.com\"),\n (\n u\"\u03b4\u03bf\u03ba.\u03b9\u03bc\u03ae@\u03c0\u03b1\u03c1\u03ac\u03b4\u03b5\u03b9\u03b3\u03bc\u03b1.\u03b4\u03bf\u03ba\u03b9\u03bc\u03ae\",\n \"\\\\u03b4\\\\u03bf\\\\u03ba.\\\\u03b9\\\\u03bc\\\\u03ae@\\\\u03c0\\\\u03b1\\\\u03c1\\\\u03ac\\\\u03b4\\\\u03b5\\\\u03b9\\\\u03b3\"\n \"\\\\u03bc\\\\u03b1.\\\\u03b4\\\\u03bf\\\\u03ba\\\\u03b9\\\\u03bc\\\\u03ae\",\n ),\n ]\n for val, expected in values:\n assert _idval(val, \"a\", 6, None, item=None, config=None) == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_bytes_idval_TestMetafunc.test_bytes_idval.for_val_expected_in_valu.assert__idval_val_a_6": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_bytes_idval_TestMetafunc.test_bytes_idval.for_val_expected_in_valu.assert__idval_val_a_6", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 232, "end_line": 248, "span_ids": ["TestMetafunc.test_bytes_idval"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n def test_bytes_idval(self):\n \"\"\"unittest for the expected behavior to obtain ids for parametrized\n bytes values:\n - python2: non-ascii strings are considered bytes and formatted using\n \"binary escape\", where any byte < 127 is escaped into its hex form.\n - python3: bytes objects are always escaped using \"binary escape\".\n \"\"\"\n from _pytest.python import _idval\n\n values = [\n (b\"\", \"\"),\n (b\"\\xc3\\xb4\\xff\\xe4\", \"\\\\xc3\\\\xb4\\\\xff\\\\xe4\"),\n (b\"ascii\", \"ascii\"),\n (u\"\u03b1\u03c1\u03ac\".encode(\"utf-8\"), \"\\\\xce\\\\xb1\\\\xcf\\\\x81\\\\xce\\\\xac\"),\n ]\n for val, expected in values:\n assert _idval(val, \"a\", 6, idfn=None, item=None, config=None) == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_class_or_function_idval_TestMetafunc.test_class_or_function_idval.for_val_expected_in_valu.assert__idval_val_a_6": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_class_or_function_idval_TestMetafunc.test_class_or_function_idval.for_val_expected_in_valu.assert__idval_val_a_6", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 250, "end_line": 264, "span_ids": ["TestMetafunc.test_class_or_function_idval", "TestMetafunc.test_class_or_function_idval.TestClass", "TestMetafunc.test_class_or_function_idval.TestClass:2"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n def test_class_or_function_idval(self):\n \"\"\"unittest for the expected behavior to obtain ids for parametrized\n values that are classes or functions: their __name__.\n \"\"\"\n from _pytest.python import _idval\n\n class TestClass(object):\n pass\n\n def test_function():\n pass\n\n values = [(TestClass, \"TestClass\"), (test_function, \"test_function\")]\n for val, expected in values:\n assert _idval(val, \"a\", 6, None, item=None, config=None) == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_idmaker_autoname_TestMetafunc.test_idmaker_with_bytes_regex.assert_result_foo_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_idmaker_autoname_TestMetafunc.test_idmaker_with_bytes_regex.assert_result_foo_", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 265, "end_line": 286, "span_ids": ["TestMetafunc.test_idmaker_with_bytes_regex", "TestMetafunc.test_idmaker_autoname"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n @pytest.mark.issue(250)\n def test_idmaker_autoname(self):\n from _pytest.python import idmaker\n\n result = idmaker(\n (\"a\", \"b\"), [pytest.param(\"string\", 1.0), pytest.param(\"st-ring\", 2.0)]\n )\n assert result == [\"string-1.0\", \"st-ring-2.0\"]\n\n result = idmaker(\n (\"a\", \"b\"), [pytest.param(object(), 1.0), pytest.param(object(), object())]\n )\n assert result == [\"a0-1.0\", \"a1-b1\"]\n # unicode mixing, issue250\n result = idmaker((u\"a\", \"b\"), [pytest.param({}, b\"\\xc3\\xb4\")])\n assert result == [\"a0-\\\\xc3\\\\xb4\"]\n\n def test_idmaker_with_bytes_regex(self):\n from _pytest.python import idmaker\n\n result = idmaker((\"a\"), [pytest.param(re.compile(b\"foo\"), 1.0)])\n assert result == [\"foo\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_idmaker_native_strings_TestMetafunc.test_idmaker_native_strings.assert_result_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_idmaker_native_strings_TestMetafunc.test_idmaker_native_strings.assert_result_", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 289, "end_line": 322, "span_ids": ["TestMetafunc.test_idmaker_native_strings"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n def test_idmaker_native_strings(self):\n from _pytest.python import idmaker\n\n result = idmaker(\n (\"a\", \"b\"),\n [\n pytest.param(1.0, -1.1),\n pytest.param(2, -202),\n pytest.param(\"three\", \"three hundred\"),\n pytest.param(True, False),\n pytest.param(None, None),\n pytest.param(re.compile(\"foo\"), re.compile(\"bar\")),\n pytest.param(str, int),\n pytest.param(list(\"six\"), [66, 66]),\n pytest.param({7}, set(\"seven\")),\n pytest.param(tuple(\"eight\"), (8, -8, 8)),\n pytest.param(b\"\\xc3\\xb4\", b\"name\"),\n pytest.param(b\"\\xc3\\xb4\", u\"other\"),\n ],\n )\n assert result == [\n \"1.0--1.1\",\n \"2--202\",\n \"three-three hundred\",\n \"True-False\",\n \"None-None\",\n \"foo-bar\",\n \"str-int\",\n \"a7-b7\",\n \"a8-b8\",\n \"a9-b9\",\n \"\\\\xc3\\\\xb4-name\",\n \"\\\\xc3\\\\xb4-other\",\n ]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_idmaker_non_printable_characters_TestMetafunc.test_idmaker_non_printable_characters.assert_result_x00_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_idmaker_non_printable_characters_TestMetafunc.test_idmaker_non_printable_characters.assert_result_x00_", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 324, "end_line": 338, "span_ids": ["TestMetafunc.test_idmaker_non_printable_characters"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n def test_idmaker_non_printable_characters(self):\n from _pytest.python import idmaker\n\n result = idmaker(\n (\"s\", \"n\"),\n [\n pytest.param(\"\\x00\", 1),\n pytest.param(\"\\x05\", 2),\n pytest.param(b\"\\x00\", 3),\n pytest.param(b\"\\x05\", 4),\n pytest.param(\"\\t\", 5),\n pytest.param(b\"\\t\", 6),\n ],\n )\n assert result == [\"\\\\x00-1\", \"\\\\x05-2\", \"\\\\x00-3\", \"\\\\x05-4\", \"\\\\t-5\", \"\\\\t-6\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_idmaker_manual_ids_must_be_printable_TestMetafunc.test_idmaker_enum.assert_result_Foo_on": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_idmaker_manual_ids_must_be_printable_TestMetafunc.test_idmaker_enum.assert_result_Foo_on", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 340, "end_line": 358, "span_ids": ["TestMetafunc.test_idmaker_enum", "TestMetafunc.test_idmaker_manual_ids_must_be_printable"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n def test_idmaker_manual_ids_must_be_printable(self):\n from _pytest.python import idmaker\n\n result = idmaker(\n (\"s\",),\n [\n pytest.param(\"x00\", id=\"hello \\x00\"),\n pytest.param(\"x05\", id=\"hello \\x05\"),\n ],\n )\n assert result == [\"hello \\\\x00\", \"hello \\\\x05\"]\n\n def test_idmaker_enum(self):\n from _pytest.python import idmaker\n\n enum = pytest.importorskip(\"enum\")\n e = enum.Enum(\"Foo\", \"one, two\")\n result = idmaker((\"a\", \"b\"), [pytest.param(e.one, e.two)])\n assert result == [\"Foo.one-Foo.two\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_idmaker_idfn_TestMetafunc.test_idmaker_idfn.assert_result_10_0_I": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_idmaker_idfn_TestMetafunc.test_idmaker_idfn.assert_result_10_0_I", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 359, "end_line": 376, "span_ids": ["TestMetafunc.test_idmaker_idfn"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n @pytest.mark.issue(351)\n def test_idmaker_idfn(self):\n from _pytest.python import idmaker\n\n def ids(val):\n if isinstance(val, Exception):\n return repr(val)\n\n result = idmaker(\n (\"a\", \"b\"),\n [\n pytest.param(10.0, IndexError()),\n pytest.param(20, KeyError()),\n pytest.param(\"three\", [1, 2, 3]),\n ],\n idfn=ids,\n )\n assert result == [\"10.0-IndexError()\", \"20-KeyError()\", \"three-b2\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_idmaker_idfn_unique_names_TestMetafunc.test_idmaker_idfn_unique_names.assert_result_a_a0_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_idmaker_idfn_unique_names_TestMetafunc.test_idmaker_idfn_unique_names.assert_result_a_a0_", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 378, "end_line": 394, "span_ids": ["TestMetafunc.test_idmaker_idfn_unique_names"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n @pytest.mark.issue(351)\n def test_idmaker_idfn_unique_names(self):\n from _pytest.python import idmaker\n\n def ids(val):\n return \"a\"\n\n result = idmaker(\n (\"a\", \"b\"),\n [\n pytest.param(10.0, IndexError()),\n pytest.param(20, KeyError()),\n pytest.param(\"three\", [1, 2, 3]),\n ],\n idfn=ids,\n )\n assert result == [\"a-a0\", \"a-a1\", \"a-a2\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_ids_exception_TestMetafunc.test_parametrize_ids_exception.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_ids_exception_TestMetafunc.test_parametrize_ids_exception.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 397, "end_line": 420, "span_ids": ["TestMetafunc.test_parametrize_ids_exception"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n def test_parametrize_ids_exception(self, testdir):\n \"\"\"\n :param testdir: the instance of Testdir class, a temporary\n test directory.\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n def ids(arg):\n raise Exception(\"bad ids\")\n\n @pytest.mark.parametrize(\"arg\", [\"a\", \"b\"], ids=ids)\n def test_foo(arg):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"*test_foo: error raised while trying to determine id of parameter 'arg' at position 0\",\n \"*Exception: bad ids\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_ids_returns_non_string_TestMetafunc.test_idmaker_with_ids_unique_names.assert_result_a0_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_ids_returns_non_string_TestMetafunc.test_idmaker_with_ids_unique_names.assert_result_a0_", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 422, "end_line": 461, "span_ids": ["TestMetafunc.test_idmaker_with_ids", "TestMetafunc.test_parametrize_ids_returns_non_string", "TestMetafunc.test_idmaker_with_ids_unique_names", "TestMetafunc.test_idmaker_with_paramset_id"], "tokens": 330}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n def test_parametrize_ids_returns_non_string(self, testdir):\n testdir.makepyfile(\n \"\"\"\\\n import pytest\n\n def ids(d):\n return d\n\n @pytest.mark.parametrize(\"arg\", ({1: 2}, {3, 4}), ids=ids)\n def test(arg):\n assert arg\n \"\"\"\n )\n assert testdir.runpytest().ret == 0\n\n def test_idmaker_with_ids(self):\n from _pytest.python import idmaker\n\n result = idmaker(\n (\"a\", \"b\"), [pytest.param(1, 2), pytest.param(3, 4)], ids=[\"a\", None]\n )\n assert result == [\"a\", \"3-4\"]\n\n def test_idmaker_with_paramset_id(self):\n from _pytest.python import idmaker\n\n result = idmaker(\n (\"a\", \"b\"),\n [pytest.param(1, 2, id=\"me\"), pytest.param(3, 4, id=\"you\")],\n ids=[\"a\", None],\n )\n assert result == [\"me\", \"you\"]\n\n def test_idmaker_with_ids_unique_names(self):\n from _pytest.python import idmaker\n\n result = idmaker(\n (\"a\"), map(pytest.param, [1, 2, 3, 4, 5]), ids=[\"a\", \"a\", \"b\", \"c\", \"b\"]\n )\n assert result == [\"a0\", \"a1\", \"b0\", \"c\", \"b1\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_indirect_TestMetafunc.test_parametrize_indirect.None_4": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_indirect_TestMetafunc.test_parametrize_indirect.None_4", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 462, "end_line": 474, "span_ids": ["TestMetafunc.test_parametrize_indirect"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n @pytest.mark.issue(714)\n def test_parametrize_indirect(self):\n def func(x, y):\n pass\n\n metafunc = self.Metafunc(func)\n metafunc.parametrize(\"x\", [1], indirect=True)\n metafunc.parametrize(\"y\", [2, 3], indirect=True)\n assert len(metafunc._calls) == 2\n assert metafunc._calls[0].funcargs == {}\n assert metafunc._calls[1].funcargs == {}\n assert metafunc._calls[0].params == dict(x=1, y=2)\n assert metafunc._calls[1].params == dict(x=1, y=3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_indirect_list_TestMetafunc.test_parametrize_indirect_list_empty.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_indirect_list_TestMetafunc.test_parametrize_indirect_list_empty.None_1", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 476, "end_line": 504, "span_ids": ["TestMetafunc.test_parametrize_indirect_list_all", "TestMetafunc.test_parametrize_indirect_list", "TestMetafunc.test_parametrize_indirect_list_empty"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n @pytest.mark.issue(714)\n def test_parametrize_indirect_list(self):\n def func(x, y):\n pass\n\n metafunc = self.Metafunc(func)\n metafunc.parametrize(\"x, y\", [(\"a\", \"b\")], indirect=[\"x\"])\n assert metafunc._calls[0].funcargs == dict(y=\"b\")\n assert metafunc._calls[0].params == dict(x=\"a\")\n\n @pytest.mark.issue(714)\n def test_parametrize_indirect_list_all(self):\n def func(x, y):\n pass\n\n metafunc = self.Metafunc(func)\n metafunc.parametrize(\"x, y\", [(\"a\", \"b\")], indirect=[\"x\", \"y\"])\n assert metafunc._calls[0].funcargs == {}\n assert metafunc._calls[0].params == dict(x=\"a\", y=\"b\")\n\n @pytest.mark.issue(714)\n def test_parametrize_indirect_list_empty(self):\n def func(x, y):\n pass\n\n metafunc = self.Metafunc(func)\n metafunc.parametrize(\"x, y\", [(\"a\", \"b\")], indirect=[])\n assert metafunc._calls[0].funcargs == dict(x=\"a\", y=\"b\")\n assert metafunc._calls[0].params == {}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_indirect_list_functional_TestMetafunc.test_parametrize_indirect_list_functional.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_indirect_list_functional_TestMetafunc.test_parametrize_indirect_list_functional.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 506, "end_line": 533, "span_ids": ["TestMetafunc.test_parametrize_indirect_list_functional"], "tokens": 229}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n @pytest.mark.issue(714)\n def test_parametrize_indirect_list_functional(self, testdir):\n \"\"\"\n Test parametrization with 'indirect' parameter applied on\n particular arguments. As y is is direct, its value should\n be used directly rather than being passed to the fixture\n y.\n\n :param testdir: the instance of Testdir class, a temporary\n test directory.\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture(scope='function')\n def x(request):\n return request.param * 3\n @pytest.fixture(scope='function')\n def y(request):\n return request.param * 2\n @pytest.mark.parametrize('x, y', [('a', 'b')], indirect=['x'])\n def test_simple(x,y):\n assert len(x) == 3\n assert len(y) == 1\n \"\"\"\n )\n result = testdir.runpytest(\"-v\")\n result.stdout.fnmatch_lines([\"*test_simple*a-b*\", \"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_indirect_list_error_TestMetafunc.test_parametrize_uses_no_fixture_error_indirect_false.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_indirect_list_error_TestMetafunc.test_parametrize_uses_no_fixture_error_indirect_false.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 535, "end_line": 561, "span_ids": ["TestMetafunc.test_parametrize_uses_no_fixture_error_indirect_false", "TestMetafunc.test_parametrize_indirect_list_error"], "tokens": 238}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n @pytest.mark.issue(714)\n def test_parametrize_indirect_list_error(self, testdir):\n def func(x, y):\n pass\n\n metafunc = self.Metafunc(func)\n with pytest.raises(pytest.fail.Exception):\n metafunc.parametrize(\"x, y\", [(\"a\", \"b\")], indirect=[\"x\", \"z\"])\n\n @pytest.mark.issue(714)\n def test_parametrize_uses_no_fixture_error_indirect_false(self, testdir):\n \"\"\"The 'uses no fixture' error tells the user at collection time\n that the parametrize data they've set up doesn't correspond to the\n fixtures in their test function, rather than silently ignoring this\n and letting the test potentially pass.\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.mark.parametrize('x, y', [('a', 'b')], indirect=False)\n def test_simple(x):\n assert len(x) == 3\n \"\"\"\n )\n result = testdir.runpytest(\"--collect-only\")\n result.stdout.fnmatch_lines([\"*uses no argument 'y'*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_uses_no_fixture_error_indirect_true_TestMetafunc.test_parametrize_uses_no_fixture_error_indirect_true.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_uses_no_fixture_error_indirect_true_TestMetafunc.test_parametrize_uses_no_fixture_error_indirect_true.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 563, "end_line": 581, "span_ids": ["TestMetafunc.test_parametrize_uses_no_fixture_error_indirect_true"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n @pytest.mark.issue(714)\n def test_parametrize_uses_no_fixture_error_indirect_true(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture(scope='function')\n def x(request):\n return request.param * 3\n @pytest.fixture(scope='function')\n def y(request):\n return request.param * 2\n\n @pytest.mark.parametrize('x, y', [('a', 'b')], indirect=True)\n def test_simple(x):\n assert len(x) == 3\n \"\"\"\n )\n result = testdir.runpytest(\"--collect-only\")\n result.stdout.fnmatch_lines([\"*uses no fixture 'y'*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_indirect_uses_no_fixture_error_indirect_string_TestMetafunc.test_parametrize_indirect_uses_no_fixture_error_indirect_string.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_indirect_uses_no_fixture_error_indirect_string_TestMetafunc.test_parametrize_indirect_uses_no_fixture_error_indirect_string.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 583, "end_line": 598, "span_ids": ["TestMetafunc.test_parametrize_indirect_uses_no_fixture_error_indirect_string"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n @pytest.mark.issue(714)\n def test_parametrize_indirect_uses_no_fixture_error_indirect_string(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture(scope='function')\n def x(request):\n return request.param * 3\n\n @pytest.mark.parametrize('x, y', [('a', 'b')], indirect='y')\n def test_simple(x):\n assert len(x) == 3\n \"\"\"\n )\n result = testdir.runpytest(\"--collect-only\")\n result.stdout.fnmatch_lines([\"*uses no fixture 'y'*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_indirect_uses_no_fixture_error_indirect_list_TestMetafunc.test_parametrize_indirect_uses_no_fixture_error_indirect_list.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_indirect_uses_no_fixture_error_indirect_list_TestMetafunc.test_parametrize_indirect_uses_no_fixture_error_indirect_list.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 600, "end_line": 615, "span_ids": ["TestMetafunc.test_parametrize_indirect_uses_no_fixture_error_indirect_list"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n @pytest.mark.issue(714)\n def test_parametrize_indirect_uses_no_fixture_error_indirect_list(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture(scope='function')\n def x(request):\n return request.param * 3\n\n @pytest.mark.parametrize('x, y', [('a', 'b')], indirect=['y'])\n def test_simple(x):\n assert len(x) == 3\n \"\"\"\n )\n result = testdir.runpytest(\"--collect-only\")\n result.stdout.fnmatch_lines([\"*uses no fixture 'y'*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_argument_not_in_indirect_list_TestMetafunc.test_parametrize_argument_not_in_indirect_list.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_argument_not_in_indirect_list_TestMetafunc.test_parametrize_argument_not_in_indirect_list.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 617, "end_line": 632, "span_ids": ["TestMetafunc.test_parametrize_argument_not_in_indirect_list"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n @pytest.mark.issue(714)\n def test_parametrize_argument_not_in_indirect_list(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture(scope='function')\n def x(request):\n return request.param * 3\n\n @pytest.mark.parametrize('x, y', [('a', 'b')], indirect=['x'])\n def test_simple(x):\n assert len(x) == 3\n \"\"\"\n )\n result = testdir.runpytest(\"--collect-only\")\n result.stdout.fnmatch_lines([\"*uses no argument 'y'*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_gives_indicative_error_on_function_with_default_argument_TestMetafunc.test_parametrize_gives_indicative_error_on_function_with_default_argument.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_gives_indicative_error_on_function_with_default_argument_TestMetafunc.test_parametrize_gives_indicative_error_on_function_with_default_argument.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 641, "end_line": 656, "span_ids": ["TestMetafunc.test_parametrize_gives_indicative_error_on_function_with_default_argument"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n def test_parametrize_gives_indicative_error_on_function_with_default_argument(\n self, testdir\n ):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.mark.parametrize('x, y', [('a', 'b')])\n def test_simple(x, y=1):\n assert len(x) == 1\n \"\"\"\n )\n result = testdir.runpytest(\"--collect-only\")\n result.stdout.fnmatch_lines(\n [\"*already takes an argument 'y' with a default value\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_functional_TestMetafunc.test_parametrize_functional.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_functional_TestMetafunc.test_parametrize_functional.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 658, "end_line": 677, "span_ids": ["TestMetafunc.test_parametrize_functional"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n def test_parametrize_functional(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n def pytest_generate_tests(metafunc):\n metafunc.parametrize('x', [1,2], indirect=True)\n metafunc.parametrize('y', [2])\n @pytest.fixture\n def x(request):\n return request.param * 10\n\n def test_simple(x,y):\n assert x in (10,20)\n assert y == 2\n \"\"\"\n )\n result = testdir.runpytest(\"-v\")\n result.stdout.fnmatch_lines(\n [\"*test_simple*1-2*\", \"*test_simple*2-2*\", \"*2 passed*\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_onearg_TestMetafunc.test_parametrize_onearg_indirect.None_3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_onearg_TestMetafunc.test_parametrize_onearg_indirect.None_3", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 679, "end_line": 694, "span_ids": ["TestMetafunc.test_parametrize_onearg_indirect", "TestMetafunc.test_parametrize_onearg"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n def test_parametrize_onearg(self):\n metafunc = self.Metafunc(lambda x: None)\n metafunc.parametrize(\"x\", [1, 2])\n assert len(metafunc._calls) == 2\n assert metafunc._calls[0].funcargs == dict(x=1)\n assert metafunc._calls[0].id == \"1\"\n assert metafunc._calls[1].funcargs == dict(x=2)\n assert metafunc._calls[1].id == \"2\"\n\n def test_parametrize_onearg_indirect(self):\n metafunc = self.Metafunc(lambda x: None)\n metafunc.parametrize(\"x\", [1, 2], indirect=True)\n assert metafunc._calls[0].params == dict(x=1)\n assert metafunc._calls[0].id == \"1\"\n assert metafunc._calls[1].params == dict(x=2)\n assert metafunc._calls[1].id == \"2\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_twoargs_TestMetafunc.test_parametrize_twoargs.None_4": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_twoargs_TestMetafunc.test_parametrize_twoargs.None_4", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 696, "end_line": 703, "span_ids": ["TestMetafunc.test_parametrize_twoargs"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n def test_parametrize_twoargs(self):\n metafunc = self.Metafunc(lambda x, y: None)\n metafunc.parametrize((\"x\", \"y\"), [(1, 2), (3, 4)])\n assert len(metafunc._calls) == 2\n assert metafunc._calls[0].funcargs == dict(x=1, y=2)\n assert metafunc._calls[0].id == \"1-2\"\n assert metafunc._calls[1].funcargs == dict(x=3, y=4)\n assert metafunc._calls[1].id == \"3-4\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_multiple_times_TestMetafunc.test_parametrize_CSV.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_multiple_times_TestMetafunc.test_parametrize_CSV.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 705, "end_line": 732, "span_ids": ["TestMetafunc.test_parametrize_CSV", "TestMetafunc.test_parametrize_multiple_times"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n def test_parametrize_multiple_times(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n pytestmark = pytest.mark.parametrize(\"x\", [1,2])\n def test_func(x):\n assert 0, x\n class TestClass(object):\n pytestmark = pytest.mark.parametrize(\"y\", [3,4])\n def test_meth(self, x, y):\n assert 0, x\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret == 1\n result.assert_outcomes(failed=6)\n\n def test_parametrize_CSV(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.parametrize(\"x, y,\", [(1,2), (2,3)])\n def test_func(x, y):\n assert x+1 == y\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_class_scenarios_TestMetafunc.test_parametrize_class_scenarios.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_parametrize_class_scenarios_TestMetafunc.test_parametrize_class_scenarios.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 734, "end_line": 774, "span_ids": ["TestMetafunc.test_parametrize_class_scenarios"], "tokens": 314}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n def test_parametrize_class_scenarios(self, testdir):\n testdir.makepyfile(\n \"\"\"\n # same as doc/en/example/parametrize scenario example\n def pytest_generate_tests(metafunc):\n idlist = []\n argvalues = []\n for scenario in metafunc.cls.scenarios:\n idlist.append(scenario[0])\n items = scenario[1].items()\n argnames = [x[0] for x in items]\n argvalues.append(([x[1] for x in items]))\n metafunc.parametrize(argnames, argvalues, ids=idlist, scope=\"class\")\n\n class Test(object):\n scenarios = [['1', {'arg': {1: 2}, \"arg2\": \"value2\"}],\n ['2', {'arg':'value2', \"arg2\": \"value2\"}]]\n\n def test_1(self, arg, arg2):\n pass\n\n def test_2(self, arg2, arg):\n pass\n\n def test_3(self, arg, arg2):\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"-v\")\n assert result.ret == 0\n result.stdout.fnmatch_lines(\n \"\"\"\n *test_1*1*\n *test_2*1*\n *test_3*1*\n *test_1*2*\n *test_2*2*\n *test_3*2*\n *6 passed*\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_format_args_TestMetafunc.test_format_args.None_3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafunc.test_format_args_TestMetafunc.test_format_args.None_3", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 776, "end_line": 795, "span_ids": ["TestMetafunc.test_format_args"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafunc(object):\n\n def test_format_args(self):\n def function1():\n pass\n\n assert fixtures._format_args(function1) == \"()\"\n\n def function2(arg1):\n pass\n\n assert fixtures._format_args(function2) == \"(arg1)\"\n\n def function3(arg1, arg2=\"qwe\"):\n pass\n\n assert fixtures._format_args(function3) == \"(arg1, arg2='qwe')\"\n\n def function4(arg1, *args, **kwargs):\n pass\n\n assert fixtures._format_args(function4) == \"(arg1, *args, **kwargs)\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional_TestMetafuncFunctional.test_attributes.result_assert_outcomes_pa": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional_TestMetafuncFunctional.test_attributes.result_assert_outcomes_pa", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 798, "end_line": 827, "span_ids": ["TestMetafuncFunctional", "TestMetafuncFunctional.test_attributes"], "tokens": 235}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafuncFunctional(object):\n def test_attributes(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n # assumes that generate/provide runs in the same process\n import sys, pytest, six\n def pytest_generate_tests(metafunc):\n metafunc.parametrize('metafunc', [metafunc])\n\n @pytest.fixture\n def metafunc(request):\n return request.param\n\n def test_function(metafunc, pytestconfig):\n assert metafunc.config == pytestconfig\n assert metafunc.module.__name__ == __name__\n assert metafunc.function == test_function\n assert metafunc.cls is None\n\n class TestClass(object):\n def test_method(self, metafunc, pytestconfig):\n assert metafunc.config == pytestconfig\n assert metafunc.module.__name__ == __name__\n unbound = six.get_unbound_function(TestClass.test_method)\n assert metafunc.function == unbound\n assert metafunc.cls == TestClass\n \"\"\"\n )\n result = testdir.runpytest(p, \"-v\", SHOW_PYTEST_WARNINGS_ARG)\n result.assert_outcomes(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_two_functions_TestMetafuncFunctional.test_two_functions.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_two_functions_TestMetafuncFunctional.test_two_functions.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 829, "end_line": 851, "span_ids": ["TestMetafuncFunctional.test_two_functions"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafuncFunctional(object):\n\n def test_two_functions(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n def pytest_generate_tests(metafunc):\n metafunc.parametrize('arg1', [10, 20], ids=['0', '1'])\n\n def test_func1(arg1):\n assert arg1 == 10\n\n def test_func2(arg1):\n assert arg1 in (10, 20)\n \"\"\"\n )\n result = testdir.runpytest(\"-v\", p, SHOW_PYTEST_WARNINGS_ARG)\n result.stdout.fnmatch_lines(\n [\n \"*test_func1*0*PASS*\",\n \"*test_func1*1*FAIL*\",\n \"*test_func2*PASS*\",\n \"*test_func2*PASS*\",\n \"*1 failed, 3 passed*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_noself_in_method_TestMetafuncFunctional.test_generate_tests_in_class.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_noself_in_method_TestMetafuncFunctional.test_generate_tests_in_class.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 853, "end_line": 879, "span_ids": ["TestMetafuncFunctional.test_noself_in_method", "TestMetafuncFunctional.test_generate_tests_in_class"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafuncFunctional(object):\n\n def test_noself_in_method(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n def pytest_generate_tests(metafunc):\n assert 'xyz' not in metafunc.fixturenames\n\n class TestHello(object):\n def test_hello(xyz):\n pass\n \"\"\"\n )\n result = testdir.runpytest(p)\n result.assert_outcomes(passed=1)\n\n def test_generate_tests_in_class(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n class TestClass(object):\n def pytest_generate_tests(self, metafunc):\n metafunc.parametrize('hello', ['world'], ids=['hellow'])\n\n def test_myfunc(self, hello):\n assert hello == \"world\"\n \"\"\"\n )\n result = testdir.runpytest(\"-v\", p, SHOW_PYTEST_WARNINGS_ARG)\n result.stdout.fnmatch_lines([\"*test_myfunc*hello*PASS*\", \"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_two_functions_not_same_instance_TestMetafuncFunctional.test_two_functions_not_same_instance.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_two_functions_not_same_instance_TestMetafuncFunctional.test_two_functions_not_same_instance.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 881, "end_line": 896, "span_ids": ["TestMetafuncFunctional.test_two_functions_not_same_instance"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafuncFunctional(object):\n\n def test_two_functions_not_same_instance(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n def pytest_generate_tests(metafunc):\n metafunc.parametrize('arg1', [10, 20], ids=[\"0\", \"1\"])\n\n class TestClass(object):\n def test_func(self, arg1):\n assert not hasattr(self, 'x')\n self.x = 1\n \"\"\"\n )\n result = testdir.runpytest(\"-v\", p, SHOW_PYTEST_WARNINGS_ARG)\n result.stdout.fnmatch_lines(\n [\"*test_func*0*PASS*\", \"*test_func*1*PASS*\", \"*2 pass*\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_issue28_setup_method_in_generate_tests_TestMetafuncFunctional.test_issue28_setup_method_in_generate_tests.result_assert_outcomes_pa": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_issue28_setup_method_in_generate_tests_TestMetafuncFunctional.test_issue28_setup_method_in_generate_tests.result_assert_outcomes_pa", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 898, "end_line": 912, "span_ids": ["TestMetafuncFunctional.test_issue28_setup_method_in_generate_tests"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafuncFunctional(object):\n\n def test_issue28_setup_method_in_generate_tests(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n def pytest_generate_tests(metafunc):\n metafunc.parametrize('arg1', [1])\n\n class TestClass(object):\n def test_method(self, arg1):\n assert arg1 == self.val\n def setup_method(self, func):\n self.val = 1\n \"\"\"\n )\n result = testdir.runpytest(p, SHOW_PYTEST_WARNINGS_ARG)\n result.assert_outcomes(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_functional2_TestMetafuncFunctional.test_parametrize_functional2.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_functional2_TestMetafuncFunctional.test_parametrize_functional2.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 914, "end_line": 927, "span_ids": ["TestMetafuncFunctional.test_parametrize_functional2"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafuncFunctional(object):\n\n def test_parametrize_functional2(self, testdir):\n testdir.makepyfile(\n \"\"\"\n def pytest_generate_tests(metafunc):\n metafunc.parametrize(\"arg1\", [1,2])\n metafunc.parametrize(\"arg2\", [4,5])\n def test_hello(arg1, arg2):\n assert 0, (arg1, arg2)\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\"*(1, 4)*\", \"*(1, 5)*\", \"*(2, 4)*\", \"*(2, 5)*\", \"*4 failed*\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_and_inner_getfixturevalue_TestMetafuncFunctional.test_parametrize_and_inner_getfixturevalue.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_and_inner_getfixturevalue_TestMetafuncFunctional.test_parametrize_and_inner_getfixturevalue.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 929, "end_line": 951, "span_ids": ["TestMetafuncFunctional.test_parametrize_and_inner_getfixturevalue"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafuncFunctional(object):\n\n def test_parametrize_and_inner_getfixturevalue(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n def pytest_generate_tests(metafunc):\n metafunc.parametrize(\"arg1\", [1], indirect=True)\n metafunc.parametrize(\"arg2\", [10], indirect=True)\n\n import pytest\n @pytest.fixture\n def arg1(request):\n x = request.getfixturevalue(\"arg2\")\n return x + request.param\n\n @pytest.fixture\n def arg2(request):\n return request.param\n\n def test_func1(arg1, arg2):\n assert arg1 == 11\n \"\"\"\n )\n result = testdir.runpytest(\"-v\", p)\n result.stdout.fnmatch_lines([\"*test_func1*1*PASS*\", \"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_on_setup_arg_TestMetafuncFunctional.test_parametrize_on_setup_arg.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_on_setup_arg_TestMetafuncFunctional.test_parametrize_on_setup_arg.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 953, "end_line": 974, "span_ids": ["TestMetafuncFunctional.test_parametrize_on_setup_arg"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafuncFunctional(object):\n\n def test_parametrize_on_setup_arg(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n def pytest_generate_tests(metafunc):\n assert \"arg1\" in metafunc.fixturenames\n metafunc.parametrize(\"arg1\", [1], indirect=True)\n\n import pytest\n @pytest.fixture\n def arg1(request):\n return request.param\n\n @pytest.fixture\n def arg2(request, arg1):\n return 10 * arg1\n\n def test_func(arg2):\n assert arg2 == 10\n \"\"\"\n )\n result = testdir.runpytest(\"-v\", p)\n result.stdout.fnmatch_lines([\"*test_func*1*PASS*\", \"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_with_ids_TestMetafuncFunctional.test_parametrize_with_ids.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_with_ids_TestMetafuncFunctional.test_parametrize_with_ids.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 976, "end_line": 998, "span_ids": ["TestMetafuncFunctional.test_parametrize_with_ids"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafuncFunctional(object):\n\n def test_parametrize_with_ids(self, testdir):\n testdir.makeini(\n \"\"\"\n [pytest]\n console_output_style=classic\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n import pytest\n def pytest_generate_tests(metafunc):\n metafunc.parametrize((\"a\", \"b\"), [(1,1), (1,2)],\n ids=[\"basic\", \"advanced\"])\n\n def test_function(a, b):\n assert a == b\n \"\"\"\n )\n result = testdir.runpytest(\"-v\")\n assert result.ret == 1\n result.stdout.fnmatch_lines_random(\n [\"*test_function*basic*PASSED\", \"*test_function*advanced*FAILED\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_without_ids_TestMetafuncFunctional.test_parametrize_without_ids.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_without_ids_TestMetafuncFunctional.test_parametrize_without_ids.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1000, "end_line": 1018, "span_ids": ["TestMetafuncFunctional.test_parametrize_without_ids"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafuncFunctional(object):\n\n def test_parametrize_without_ids(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n def pytest_generate_tests(metafunc):\n metafunc.parametrize((\"a\", \"b\"),\n [(1,object()), (1.3,object())])\n\n def test_function(a, b):\n assert 1\n \"\"\"\n )\n result = testdir.runpytest(\"-v\")\n result.stdout.fnmatch_lines(\n \"\"\"\n *test_function*1-b0*\n *test_function*1.3-b1*\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_with_None_in_ids_TestMetafuncFunctional.test_parametrize_with_None_in_ids.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_with_None_in_ids_TestMetafuncFunctional.test_parametrize_with_None_in_ids.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1020, "end_line": 1040, "span_ids": ["TestMetafuncFunctional.test_parametrize_with_None_in_ids"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafuncFunctional(object):\n\n def test_parametrize_with_None_in_ids(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n def pytest_generate_tests(metafunc):\n metafunc.parametrize((\"a\", \"b\"), [(1,1), (1,1), (1,2)],\n ids=[\"basic\", None, \"advanced\"])\n\n def test_function(a, b):\n assert a == b\n \"\"\"\n )\n result = testdir.runpytest(\"-v\")\n assert result.ret == 1\n result.stdout.fnmatch_lines_random(\n [\n \"*test_function*basic*PASSED*\",\n \"*test_function*1-1*PASSED*\",\n \"*test_function*advanced*FAILED*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_fixture_parametrized_empty_ids_TestMetafuncFunctional.test_parametrized_empty_ids.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_fixture_parametrized_empty_ids_TestMetafuncFunctional.test_parametrized_empty_ids.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1042, "end_line": 1071, "span_ids": ["TestMetafuncFunctional.test_parametrized_empty_ids", "TestMetafuncFunctional.test_fixture_parametrized_empty_ids"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafuncFunctional(object):\n\n def test_fixture_parametrized_empty_ids(self, testdir):\n \"\"\"Fixtures parametrized with empty ids cause an internal error (#1849).\"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture(scope=\"module\", ids=[], params=[])\n def temp(request):\n return request.param\n\n def test_temp(temp):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"* 1 skipped *\"])\n\n def test_parametrized_empty_ids(self, testdir):\n \"\"\"Tests parametrized with empty ids cause an internal error (#1849).\"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.mark.parametrize('temp', [], ids=list())\n def test_temp(temp):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"* 1 skipped *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrized_ids_invalid_type_TestMetafuncFunctional.test_parametrized_ids_invalid_type.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrized_ids_invalid_type_TestMetafuncFunctional.test_parametrized_ids_invalid_type.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1073, "end_line": 1089, "span_ids": ["TestMetafuncFunctional.test_parametrized_ids_invalid_type"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafuncFunctional(object):\n\n def test_parametrized_ids_invalid_type(self, testdir):\n \"\"\"Tests parametrized with ids as non-strings (#1857).\"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.mark.parametrize(\"x, expected\", [(10, 20), (40, 80)], ids=(None, 2))\n def test_ids_numbers(x,expected):\n assert x * 2 == expected\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"*In test_ids_numbers: ids must be list of strings, found: 2 (type: *'int'>)*\"\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_with_identical_ids_get_unique_names_TestMetafuncFunctional.test_parametrize_with_identical_ids_get_unique_names.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_with_identical_ids_get_unique_names_TestMetafuncFunctional.test_parametrize_with_identical_ids_get_unique_names.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1091, "end_line": 1107, "span_ids": ["TestMetafuncFunctional.test_parametrize_with_identical_ids_get_unique_names"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafuncFunctional(object):\n\n def test_parametrize_with_identical_ids_get_unique_names(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n def pytest_generate_tests(metafunc):\n metafunc.parametrize((\"a\", \"b\"), [(1,1), (1,2)],\n ids=[\"a\", \"a\"])\n\n def test_function(a, b):\n assert a == b\n \"\"\"\n )\n result = testdir.runpytest(\"-v\")\n assert result.ret == 1\n result.stdout.fnmatch_lines_random(\n [\"*test_function*a0*PASSED*\", \"*test_function*a1*FAILED*\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_scope_overrides_TestMetafuncFunctional.test_parametrize_scope_overrides.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_scope_overrides_TestMetafuncFunctional.test_parametrize_scope_overrides.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1109, "end_line": 1133, "span_ids": ["TestMetafuncFunctional.test_parametrize_scope_overrides"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafuncFunctional(object):\n\n @pytest.mark.parametrize((\"scope\", \"length\"), [(\"module\", 2), (\"function\", 4)])\n def test_parametrize_scope_overrides(self, testdir, scope, length):\n testdir.makepyfile(\n \"\"\"\n import pytest\n values = []\n def pytest_generate_tests(metafunc):\n if \"arg\" in metafunc.funcargnames:\n metafunc.parametrize(\"arg\", [1,2], indirect=True,\n scope=%r)\n @pytest.fixture\n def arg(request):\n values.append(request.param)\n return request.param\n def test_hello(arg):\n assert arg in (1,2)\n def test_world(arg):\n assert arg in (1,2)\n def test_checklength():\n assert len(values) == %d\n \"\"\"\n % (scope, length)\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=5)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_issue323_TestMetafuncFunctional.test_usefixtures_seen_in_generate_tests.reprec_assert_outcomes_pa": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_parametrize_issue323_TestMetafuncFunctional.test_usefixtures_seen_in_generate_tests.reprec_assert_outcomes_pa", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1135, "end_line": 1167, "span_ids": ["TestMetafuncFunctional.test_parametrize_issue323", "TestMetafuncFunctional.test_usefixtures_seen_in_generate_tests"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafuncFunctional(object):\n\n def test_parametrize_issue323(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture(scope='module', params=range(966))\n def foo(request):\n return request.param\n\n def test_it(foo):\n pass\n def test_it2(foo):\n pass\n \"\"\"\n )\n reprec = testdir.inline_run(\"--collect-only\")\n assert not reprec.getcalls(\"pytest_internalerror\")\n\n def test_usefixtures_seen_in_generate_tests(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n def pytest_generate_tests(metafunc):\n assert \"abc\" in metafunc.fixturenames\n metafunc.parametrize(\"abc\", [1])\n\n @pytest.mark.usefixtures(\"abc\")\n def test_function():\n pass\n \"\"\"\n )\n reprec = testdir.runpytest()\n reprec.assert_outcomes(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_generate_tests_only_done_in_subdir_TestMetafuncFunctional.test_generate_tests_only_done_in_subdir.result_assert_outcomes_pa": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_generate_tests_only_done_in_subdir_TestMetafuncFunctional.test_generate_tests_only_done_in_subdir.result_assert_outcomes_pa", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1169, "end_line": 1191, "span_ids": ["TestMetafuncFunctional.test_generate_tests_only_done_in_subdir"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafuncFunctional(object):\n\n def test_generate_tests_only_done_in_subdir(self, testdir):\n sub1 = testdir.mkpydir(\"sub1\")\n sub2 = testdir.mkpydir(\"sub2\")\n sub1.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n def pytest_generate_tests(metafunc):\n assert metafunc.function.__name__ == \"test_1\"\n \"\"\"\n )\n )\n sub2.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n def pytest_generate_tests(metafunc):\n assert metafunc.function.__name__ == \"test_2\"\n \"\"\"\n )\n )\n sub1.join(\"test_in_sub1.py\").write(\"def test_1(): pass\")\n sub2.join(\"test_in_sub2.py\").write(\"def test_2(): pass\")\n result = testdir.runpytest(\"--keep-duplicates\", \"-v\", \"-s\", sub1, sub2, sub1)\n result.assert_outcomes(passed=3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_generate_same_function_names_issue403_TestMetafuncFunctional.test_parametrize_misspelling.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctional.test_generate_same_function_names_issue403_TestMetafuncFunctional.test_parametrize_misspelling.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1186, "end_line": 1224, "span_ids": ["TestMetafuncFunctional.test_parametrize_misspelling", "TestMetafuncFunctional.test_generate_same_function_names_issue403"], "tokens": 247}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafuncFunctional(object):\n\n def test_generate_same_function_names_issue403(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n def make_tests():\n @pytest.mark.parametrize(\"x\", range(2))\n def test_foo(x):\n pass\n return test_foo\n\n test_x = make_tests()\n test_y = make_tests()\n \"\"\"\n )\n reprec = testdir.runpytest()\n reprec.assert_outcomes(passed=4)\n\n @pytest.mark.issue(463)\n @pytest.mark.parametrize(\"attr\", [\"parametrise\", \"parameterize\", \"parameterise\"])\n def test_parametrize_misspelling(self, testdir, attr):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.mark.{}(\"x\", range(2))\n def test_foo(x):\n pass\n \"\"\".format(\n attr\n )\n )\n result = testdir.runpytest(\"--collectonly\")\n result.stdout.fnmatch_lines(\n [\n \"test_foo has '{}' mark, spelling should be 'parametrize'\".format(attr),\n \"*1 error in*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctionalAuto_TestMetafuncFunctionalAuto.test_parametrize_auto_scope.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctionalAuto_TestMetafuncFunctionalAuto.test_parametrize_auto_scope.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1234, "end_line": 1259, "span_ids": ["TestMetafuncFunctionalAuto.test_parametrize_auto_scope", "TestMetafuncFunctionalAuto"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafuncFunctionalAuto(object):\n \"\"\"\n Tests related to automatically find out the correct scope for parametrized tests (#1832).\n \"\"\"\n\n def test_parametrize_auto_scope(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture(scope='session', autouse=True)\n def fixture():\n return 1\n\n @pytest.mark.parametrize('animal', [\"dog\", \"cat\"])\n def test_1(animal):\n assert animal in ('dog', 'cat')\n\n @pytest.mark.parametrize('animal', ['fish'])\n def test_2(animal):\n assert animal == 'fish'\n\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"* 3 passed *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctionalAuto.test_parametrize_auto_scope_indirect_TestMetafuncFunctionalAuto.test_parametrize_auto_scope_indirect.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctionalAuto.test_parametrize_auto_scope_indirect_TestMetafuncFunctionalAuto.test_parametrize_auto_scope_indirect.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1261, "end_line": 1282, "span_ids": ["TestMetafuncFunctionalAuto.test_parametrize_auto_scope_indirect"], "tokens": 187}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafuncFunctionalAuto(object):\n\n def test_parametrize_auto_scope_indirect(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture(scope='session')\n def echo(request):\n return request.param\n\n @pytest.mark.parametrize('animal, echo', [(\"dog\", 1), (\"cat\", 2)], indirect=['echo'])\n def test_1(animal, echo):\n assert animal in ('dog', 'cat')\n assert echo in (1, 2, 3)\n\n @pytest.mark.parametrize('animal, echo', [('fish', 3)], indirect=['echo'])\n def test_2(animal, echo):\n assert animal == 'fish'\n assert echo in (1, 2, 3)\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"* 3 passed *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctionalAuto.test_parametrize_auto_scope_override_fixture_TestMetafuncFunctionalAuto.test_parametrize_auto_scope_override_fixture.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctionalAuto.test_parametrize_auto_scope_override_fixture_TestMetafuncFunctionalAuto.test_parametrize_auto_scope_override_fixture.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1284, "end_line": 1299, "span_ids": ["TestMetafuncFunctionalAuto.test_parametrize_auto_scope_override_fixture"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafuncFunctionalAuto(object):\n\n def test_parametrize_auto_scope_override_fixture(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture(scope='session', autouse=True)\n def animal():\n return 'fox'\n\n @pytest.mark.parametrize('animal', [\"dog\", \"cat\"])\n def test_1(animal):\n assert animal in ('dog', 'cat')\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"* 2 passed *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctionalAuto.test_parametrize_all_indirects_TestMetafuncFunctionalAuto.test_parametrize_all_indirects.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctionalAuto.test_parametrize_all_indirects_TestMetafuncFunctionalAuto.test_parametrize_all_indirects.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1301, "end_line": 1326, "span_ids": ["TestMetafuncFunctionalAuto.test_parametrize_all_indirects"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafuncFunctionalAuto(object):\n\n def test_parametrize_all_indirects(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture()\n def animal(request):\n return request.param\n\n @pytest.fixture(scope='session')\n def echo(request):\n return request.param\n\n @pytest.mark.parametrize('animal, echo', [(\"dog\", 1), (\"cat\", 2)], indirect=True)\n def test_1(animal, echo):\n assert animal in ('dog', 'cat')\n assert echo in (1, 2, 3)\n\n @pytest.mark.parametrize('animal, echo', [(\"fish\", 3)], indirect=True)\n def test_2(animal, echo):\n assert animal == 'fish'\n assert echo in (1, 2, 3)\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"* 3 passed *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctionalAuto.test_parametrize_some_arguments_auto_scope_TestMetafuncFunctionalAuto.test_parametrize_some_arguments_auto_scope.assert_class_fix_setup_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctionalAuto.test_parametrize_some_arguments_auto_scope_TestMetafuncFunctionalAuto.test_parametrize_some_arguments_auto_scope.assert_class_fix_setup_", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1328, "end_line": 1359, "span_ids": ["TestMetafuncFunctionalAuto.test_parametrize_some_arguments_auto_scope"], "tokens": 245}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafuncFunctionalAuto(object):\n\n def test_parametrize_some_arguments_auto_scope(self, testdir, monkeypatch):\n \"\"\"Integration test for (#3941)\"\"\"\n class_fix_setup = []\n monkeypatch.setattr(sys, \"class_fix_setup\", class_fix_setup, raising=False)\n func_fix_setup = []\n monkeypatch.setattr(sys, \"func_fix_setup\", func_fix_setup, raising=False)\n\n testdir.makepyfile(\n \"\"\"\n import pytest\n import sys\n\n @pytest.fixture(scope='class', autouse=True)\n def class_fix(request):\n sys.class_fix_setup.append(request.param)\n\n @pytest.fixture(autouse=True)\n def func_fix():\n sys.func_fix_setup.append(True)\n\n @pytest.mark.parametrize('class_fix', [10, 20], indirect=True)\n class Test:\n def test_foo(self):\n pass\n def test_bar(self):\n pass\n \"\"\"\n )\n result = testdir.runpytest_inprocess()\n result.stdout.fnmatch_lines([\"* 4 passed in *\"])\n assert func_fix_setup == [True] * 4\n assert class_fix_setup == [10, 20]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctionalAuto.test_parametrize_issue634_TestMetafuncFunctionalAuto.test_parametrize_issue634.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMetafuncFunctionalAuto.test_parametrize_issue634_TestMetafuncFunctionalAuto.test_parametrize_issue634.None_1", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1361, "end_line": 1393, "span_ids": ["TestMetafuncFunctionalAuto.test_parametrize_issue634"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMetafuncFunctionalAuto(object):\n\n def test_parametrize_issue634(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture(scope='module')\n def foo(request):\n print('preparing foo-%d' % request.param)\n return 'foo-%d' % request.param\n\n def test_one(foo):\n pass\n\n def test_two(foo):\n pass\n\n test_two.test_with = (2, 3)\n\n def pytest_generate_tests(metafunc):\n params = (1, 2, 3, 4)\n if not 'foo' in metafunc.fixturenames:\n return\n\n test_with = getattr(metafunc.function, 'test_with', None)\n if test_with:\n params = test_with\n metafunc.parametrize('foo', params, indirect=True)\n \"\"\"\n )\n result = testdir.runpytest(\"-s\")\n output = result.stdout.str()\n assert output.count(\"preparing foo-2\") == 1\n assert output.count(\"preparing foo-3\") == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization_TestMarkersWithParametrization.test_simple_mark.None_3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization_TestMarkersWithParametrization.test_simple_mark.None_3", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1389, "end_line": 1410, "span_ids": ["TestMarkersWithParametrization.test_simple_mark", "TestMarkersWithParametrization"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.issue(308)\nclass TestMarkersWithParametrization(object):\n def test_simple_mark(self, testdir):\n s = \"\"\"\n import pytest\n\n @pytest.mark.foo\n @pytest.mark.parametrize((\"n\", \"expected\"), [\n (1, 2),\n pytest.param(1, 3, marks=pytest.mark.bar),\n (2, 3),\n ])\n def test_increment(n, expected):\n assert n + 1 == expected\n \"\"\"\n items = testdir.getitems(s)\n assert len(items) == 3\n for item in items:\n assert \"foo\" in item.keywords\n assert \"bar\" not in items[0].keywords\n assert \"bar\" in items[1].keywords\n assert \"bar\" not in items[2].keywords", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_select_based_on_mark_TestMarkersWithParametrization.test_select_based_on_mark.assert_len_fail_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_select_based_on_mark_TestMarkersWithParametrization.test_select_based_on_mark.assert_len_fail_0", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1412, "end_line": 1429, "span_ids": ["TestMarkersWithParametrization.test_select_based_on_mark"], "tokens": 165}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.issue(308)\nclass TestMarkersWithParametrization(object):\n\n def test_select_based_on_mark(self, testdir):\n s = \"\"\"\n import pytest\n\n @pytest.mark.parametrize((\"n\", \"expected\"), [\n (1, 2),\n pytest.param(2, 3, marks=pytest.mark.foo),\n (3, 4),\n ])\n def test_increment(n, expected):\n assert n + 1 == expected\n \"\"\"\n testdir.makepyfile(s)\n rec = testdir.inline_run(\"-m\", \"foo\", SHOW_PYTEST_WARNINGS_ARG)\n passed, skipped, fail = rec.listoutcomes()\n assert len(passed) == 1\n assert len(skipped) == 0\n assert len(fail) == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_nested_marks_TestMarkersWithParametrization.test_nested_marks.for_mark_in_foo_bar_.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_nested_marks_TestMarkersWithParametrization.test_nested_marks.for_mark_in_foo_bar_.None_2", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1431, "end_line": 1450, "span_ids": ["TestMarkersWithParametrization.test_nested_marks"], "tokens": 175}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.issue(308)\nclass TestMarkersWithParametrization(object):\n\n @pytest.mark.xfail(reason=\"is this important to support??\")\n def test_nested_marks(self, testdir):\n s = \"\"\"\n import pytest\n mastermark = pytest.mark.foo(pytest.mark.bar)\n\n @pytest.mark.parametrize((\"n\", \"expected\"), [\n (1, 2),\n mastermark((1, 3)),\n (2, 3),\n ])\n def test_increment(n, expected):\n assert n + 1 == expected\n \"\"\"\n items = testdir.getitems(s)\n assert len(items) == 3\n for mark in [\"foo\", \"bar\"]:\n assert mark not in items[0].keywords\n assert mark in items[1].keywords\n assert mark not in items[2].keywords", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_simple_xfail_TestMarkersWithParametrization.test_simple_xfail.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_simple_xfail_TestMarkersWithParametrization.test_simple_xfail.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1452, "end_line": 1467, "span_ids": ["TestMarkersWithParametrization.test_simple_xfail"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.issue(308)\nclass TestMarkersWithParametrization(object):\n\n def test_simple_xfail(self, testdir):\n s = \"\"\"\n import pytest\n\n @pytest.mark.parametrize((\"n\", \"expected\"), [\n (1, 2),\n pytest.param(1, 3, marks=pytest.mark.xfail),\n (2, 3),\n ])\n def test_increment(n, expected):\n assert n + 1 == expected\n \"\"\"\n testdir.makepyfile(s)\n reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG)\n # xfail is skip??\n reprec.assertoutcome(passed=2, skipped=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_simple_xfail_single_argname_TestMarkersWithParametrization.test_simple_xfail_single_argname.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_simple_xfail_single_argname_TestMarkersWithParametrization.test_simple_xfail_single_argname.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1469, "end_line": 1483, "span_ids": ["TestMarkersWithParametrization.test_simple_xfail_single_argname"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.issue(308)\nclass TestMarkersWithParametrization(object):\n\n def test_simple_xfail_single_argname(self, testdir):\n s = \"\"\"\n import pytest\n\n @pytest.mark.parametrize(\"n\", [\n 2,\n pytest.param(3, marks=pytest.mark.xfail),\n 4,\n ])\n def test_isEven(n):\n assert n % 2 == 0\n \"\"\"\n testdir.makepyfile(s)\n reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG)\n reprec.assertoutcome(passed=2, skipped=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_xfail_with_arg_TestMarkersWithParametrization.test_xfail_with_arg.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_xfail_with_arg_TestMarkersWithParametrization.test_xfail_with_arg.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1485, "end_line": 1499, "span_ids": ["TestMarkersWithParametrization.test_xfail_with_arg"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.issue(308)\nclass TestMarkersWithParametrization(object):\n\n def test_xfail_with_arg(self, testdir):\n s = \"\"\"\n import pytest\n\n @pytest.mark.parametrize((\"n\", \"expected\"), [\n (1, 2),\n pytest.param(1, 3, marks=pytest.mark.xfail(\"True\")),\n (2, 3),\n ])\n def test_increment(n, expected):\n assert n + 1 == expected\n \"\"\"\n testdir.makepyfile(s)\n reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG)\n reprec.assertoutcome(passed=2, skipped=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_xfail_with_kwarg_TestMarkersWithParametrization.test_xfail_with_kwarg.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_xfail_with_kwarg_TestMarkersWithParametrization.test_xfail_with_kwarg.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1501, "end_line": 1515, "span_ids": ["TestMarkersWithParametrization.test_xfail_with_kwarg"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.issue(308)\nclass TestMarkersWithParametrization(object):\n\n def test_xfail_with_kwarg(self, testdir):\n s = \"\"\"\n import pytest\n\n @pytest.mark.parametrize((\"n\", \"expected\"), [\n (1, 2),\n pytest.param(1, 3, marks=pytest.mark.xfail(reason=\"some bug\")),\n (2, 3),\n ])\n def test_increment(n, expected):\n assert n + 1 == expected\n \"\"\"\n testdir.makepyfile(s)\n reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG)\n reprec.assertoutcome(passed=2, skipped=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_xfail_with_arg_and_kwarg_TestMarkersWithParametrization.test_xfail_with_arg_and_kwarg.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_xfail_with_arg_and_kwarg_TestMarkersWithParametrization.test_xfail_with_arg_and_kwarg.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1517, "end_line": 1531, "span_ids": ["TestMarkersWithParametrization.test_xfail_with_arg_and_kwarg"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.issue(308)\nclass TestMarkersWithParametrization(object):\n\n def test_xfail_with_arg_and_kwarg(self, testdir):\n s = \"\"\"\n import pytest\n\n @pytest.mark.parametrize((\"n\", \"expected\"), [\n (1, 2),\n pytest.param(1, 3, marks=pytest.mark.xfail(\"True\", reason=\"some bug\")),\n (2, 3),\n ])\n def test_increment(n, expected):\n assert n + 1 == expected\n \"\"\"\n testdir.makepyfile(s)\n reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG)\n reprec.assertoutcome(passed=2, skipped=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_xfail_passing_is_xpass_TestMarkersWithParametrization.test_xfail_passing_is_xpass.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_xfail_passing_is_xpass_TestMarkersWithParametrization.test_xfail_passing_is_xpass.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1533, "end_line": 1553, "span_ids": ["TestMarkersWithParametrization.test_xfail_passing_is_xpass"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.issue(308)\nclass TestMarkersWithParametrization(object):\n\n @pytest.mark.parametrize(\"strict\", [True, False])\n def test_xfail_passing_is_xpass(self, testdir, strict):\n s = \"\"\"\n import pytest\n\n m = pytest.mark.xfail(\"sys.version_info > (0, 0, 0)\", reason=\"some bug\", strict={strict})\n\n @pytest.mark.parametrize((\"n\", \"expected\"), [\n (1, 2),\n pytest.param(2, 3, marks=m),\n (3, 4),\n ])\n def test_increment(n, expected):\n assert n + 1 == expected\n \"\"\".format(\n strict=strict\n )\n testdir.makepyfile(s)\n reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG)\n passed, failed = (2, 1) if strict else (3, 0)\n reprec.assertoutcome(passed=passed, failed=failed)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_parametrize_called_in_generate_tests_TestMarkersWithParametrization.test_parametrize_ID_generation_string_int_works.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_parametrize_called_in_generate_tests_TestMarkersWithParametrization.test_parametrize_ID_generation_string_int_works.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1555, "end_line": 1594, "span_ids": ["TestMarkersWithParametrization.test_parametrize_ID_generation_string_int_works", "TestMarkersWithParametrization.test_parametrize_called_in_generate_tests"], "tokens": 276}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.issue(308)\nclass TestMarkersWithParametrization(object):\n\n def test_parametrize_called_in_generate_tests(self, testdir):\n s = \"\"\"\n import pytest\n\n\n def pytest_generate_tests(metafunc):\n passingTestData = [(1, 2),\n (2, 3)]\n failingTestData = [(1, 3),\n (2, 2)]\n\n testData = passingTestData + [pytest.param(*d, marks=pytest.mark.xfail)\n for d in failingTestData]\n metafunc.parametrize((\"n\", \"expected\"), testData)\n\n\n def test_increment(n, expected):\n assert n + 1 == expected\n \"\"\"\n testdir.makepyfile(s)\n reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG)\n reprec.assertoutcome(passed=2, skipped=2)\n\n @pytest.mark.issue(290)\n def test_parametrize_ID_generation_string_int_works(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture\n def myfixture():\n return 'example'\n @pytest.mark.parametrize(\n 'limit', (0, '0'))\n def test_limit(limit, myfixture):\n return\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_parametrize_marked_value_TestMarkersWithParametrization.test_parametrize_marked_value.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_parametrize_marked_value_TestMarkersWithParametrization.test_parametrize_marked_value.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1596, "end_line": 1619, "span_ids": ["TestMarkersWithParametrization.test_parametrize_marked_value"], "tokens": 229}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.issue(308)\nclass TestMarkersWithParametrization(object):\n\n @pytest.mark.parametrize(\"strict\", [True, False])\n def test_parametrize_marked_value(self, testdir, strict):\n s = \"\"\"\n import pytest\n\n @pytest.mark.parametrize((\"n\", \"expected\"), [\n pytest.param(\n 2,3,\n marks=pytest.mark.xfail(\"sys.version_info > (0, 0, 0)\", reason=\"some bug\", strict={strict}),\n ),\n pytest.param(\n 2,3,\n marks=[pytest.mark.xfail(\"sys.version_info > (0, 0, 0)\", reason=\"some bug\", strict={strict})],\n ),\n ])\n def test_increment(n, expected):\n assert n + 1 == expected\n \"\"\".format(\n strict=strict\n )\n testdir.makepyfile(s)\n reprec = testdir.inline_run()\n passed, failed = (0, 2) if strict else (2, 0)\n reprec.assertoutcome(passed=passed, failed=failed)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_pytest_make_parametrize_id_TestMarkersWithParametrization.test_pytest_make_parametrize_id.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_pytest_make_parametrize_id_TestMarkersWithParametrization.test_pytest_make_parametrize_id.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1621, "end_line": 1638, "span_ids": ["TestMarkersWithParametrization.test_pytest_make_parametrize_id"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.issue(308)\nclass TestMarkersWithParametrization(object):\n\n def test_pytest_make_parametrize_id(self, testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_make_parametrize_id(config, val):\n return str(val * 2)\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.mark.parametrize(\"x\", range(2))\n def test_func(x):\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"-v\")\n result.stdout.fnmatch_lines([\"*test_func*0*PASS*\", \"*test_func*2*PASS*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_pytest_make_parametrize_id_with_argname_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/metafunc.py_TestMarkersWithParametrization.test_pytest_make_parametrize_id_with_argname_", "embedding": null, "metadata": {"file_path": "testing/python/metafunc.py", "file_name": "metafunc.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1640, "end_line": 1664, "span_ids": ["TestMarkersWithParametrization.test_pytest_make_parametrize_id_with_argname"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.issue(308)\nclass TestMarkersWithParametrization(object):\n\n def test_pytest_make_parametrize_id_with_argname(self, testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_make_parametrize_id(config, val, argname):\n return str(val * 2 if argname == 'x' else val * 10)\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.mark.parametrize(\"x\", range(2))\n def test_func_a(x):\n pass\n\n @pytest.mark.parametrize(\"y\", [1])\n def test_func_b(y):\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"-v\")\n result.stdout.fnmatch_lines(\n [\"*test_func_a*0*PASS*\", \"*test_func_a*2*PASS*\", \"*test_func_b*10*PASS*\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_sys_TestRaises.test_raises_repr_inflight.with_pytest_raises_E_as_.raise_E_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_sys_TestRaises.test_raises_repr_inflight.with_pytest_raises_E_as_.raise_E_", "embedding": null, "metadata": {"file_path": "testing/python/raises.py", "file_name": "raises.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 67, "span_ids": ["TestRaises.test_raises_exec", "TestRaises.test_raises_falsey_type_error", "TestRaises.test_raises_repr_inflight.E", "TestRaises.test_raises_callable_no_exception.A", "TestRaises.test_raises_function", "TestRaises.test_raises_exec_correct_filename", "TestRaises", "TestRaises.test_raises_repr_inflight.E:2", "TestRaises.test_raises_repr_inflight", "TestRaises.test_raises_callable_no_exception.A.__call__", "imports", "TestRaises.test_raises", "TestRaises.test_raises_syntax_error", "TestRaises.test_raises_callable_no_exception"], "tokens": 447}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import sys\n\nimport six\n\nimport pytest\nfrom _pytest.outcomes import Failed\nfrom _pytest.warning_types import PytestDeprecationWarning\n\n\nclass TestRaises(object):\n def test_raises(self):\n source = \"int('qwe')\"\n with pytest.warns(PytestDeprecationWarning):\n excinfo = pytest.raises(ValueError, source)\n code = excinfo.traceback[-1].frame.code\n s = str(code.fullsource)\n assert s == source\n\n def test_raises_exec(self):\n with pytest.warns(PytestDeprecationWarning) as warninfo:\n pytest.raises(ValueError, \"a,x = []\")\n assert warninfo[0].filename == __file__\n\n def test_raises_exec_correct_filename(self):\n with pytest.warns(PytestDeprecationWarning):\n excinfo = pytest.raises(ValueError, 'int(\"s\")')\n assert __file__ in excinfo.traceback[-1].path\n\n def test_raises_syntax_error(self):\n with pytest.warns(PytestDeprecationWarning) as warninfo:\n pytest.raises(SyntaxError, \"qwe qwe qwe\")\n assert warninfo[0].filename == __file__\n\n def test_raises_function(self):\n pytest.raises(ValueError, int, \"hello\")\n\n def test_raises_callable_no_exception(self):\n class A(object):\n def __call__(self):\n pass\n\n try:\n pytest.raises(ValueError, A())\n except pytest.raises.Exception:\n pass\n\n def test_raises_falsey_type_error(self):\n with pytest.raises(TypeError):\n with pytest.raises(AssertionError, match=0):\n raise AssertionError(\"ohai\")\n\n def test_raises_repr_inflight(self):\n \"\"\"Ensure repr() on an exception info inside a pytest.raises with block works (#4386)\"\"\"\n\n class E(Exception):\n pass\n\n with pytest.raises(E) as excinfo:\n # this test prints the inflight uninitialized object\n # using repr and str as well as pprint to demonstrate\n # it works\n print(str(excinfo))\n print(repr(excinfo))\n import pprint\n\n pprint.pprint(excinfo)\n raise E()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_TestRaises.test_raises_as_contextmanager_TestRaises.test_raises_as_contextmanager.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_TestRaises.test_raises_as_contextmanager_TestRaises.test_raises_as_contextmanager.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/raises.py", "file_name": "raises.py", "file_type": "text/x-python", "category": "implementation", "start_line": 69, "end_line": 95, "span_ids": ["TestRaises.test_raises_as_contextmanager"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRaises(object):\n\n def test_raises_as_contextmanager(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n import _pytest._code\n\n def test_simple():\n with pytest.raises(ZeroDivisionError) as excinfo:\n assert isinstance(excinfo, _pytest._code.ExceptionInfo)\n 1/0\n print(excinfo)\n assert excinfo.type == ZeroDivisionError\n assert isinstance(excinfo.value, ZeroDivisionError)\n\n def test_noraise():\n with pytest.raises(pytest.raises.Exception):\n with pytest.raises(ValueError):\n int()\n\n def test_raise_wrong_exception_passes_by():\n with pytest.raises(ZeroDivisionError):\n with pytest.raises(ValueError):\n 1/0\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*3 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_TestRaises.test_does_not_raise_TestRaises.test_does_not_raise.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_TestRaises.test_does_not_raise_TestRaises.test_does_not_raise.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/raises.py", "file_name": "raises.py", "file_type": "text/x-python", "category": "implementation", "start_line": 97, "end_line": 120, "span_ids": ["TestRaises.test_does_not_raise"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRaises(object):\n\n def test_does_not_raise(self, testdir):\n testdir.makepyfile(\n \"\"\"\n from contextlib import contextmanager\n import pytest\n\n @contextmanager\n def does_not_raise():\n yield\n\n @pytest.mark.parametrize('example_input,expectation', [\n (3, does_not_raise()),\n (2, does_not_raise()),\n (1, does_not_raise()),\n (0, pytest.raises(ZeroDivisionError)),\n ])\n def test_division(example_input, expectation):\n '''Test how much I know division.'''\n with expectation:\n assert (6 / example_input) is not None\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*4 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_TestRaises.test_does_not_raise_does_raise_TestRaises.test_does_not_raise_does_raise.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_TestRaises.test_does_not_raise_does_raise_TestRaises.test_does_not_raise_does_raise.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/raises.py", "file_name": "raises.py", "file_type": "text/x-python", "category": "implementation", "start_line": 122, "end_line": 143, "span_ids": ["TestRaises.test_does_not_raise_does_raise"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRaises(object):\n\n def test_does_not_raise_does_raise(self, testdir):\n testdir.makepyfile(\n \"\"\"\n from contextlib import contextmanager\n import pytest\n\n @contextmanager\n def does_not_raise():\n yield\n\n @pytest.mark.parametrize('example_input,expectation', [\n (0, does_not_raise()),\n (1, pytest.raises(ZeroDivisionError)),\n ])\n def test_division(example_input, expectation):\n '''Test how much I know division.'''\n with expectation:\n assert (6 / example_input) is not None\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*2 failed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_TestRaises.test_noclass_TestRaises.test_custom_raise_message.try_.else_.assert_False_Expected_p": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_TestRaises.test_noclass_TestRaises.test_custom_raise_message.try_.else_.assert_False_Expected_p", "embedding": null, "metadata": {"file_path": "testing/python/raises.py", "file_name": "raises.py", "file_type": "text/x-python", "category": "implementation", "start_line": 145, "end_line": 183, "span_ids": ["TestRaises.test_tuple", "TestRaises.test_invalid_arguments_to_raises", "TestRaises.test_custom_raise_message", "TestRaises.test_noclass", "TestRaises.test_no_raise_message"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRaises(object):\n\n def test_noclass(self):\n with pytest.raises(TypeError):\n pytest.raises(\"wrong\", lambda: None)\n\n def test_invalid_arguments_to_raises(self):\n with pytest.raises(TypeError, match=\"unknown\"):\n with pytest.raises(TypeError, unknown=\"bogus\"):\n raise ValueError()\n\n def test_tuple(self):\n with pytest.raises((KeyError, ValueError)):\n raise KeyError(\"oops\")\n\n def test_no_raise_message(self):\n try:\n pytest.raises(ValueError, int, \"0\")\n except pytest.raises.Exception as e:\n assert e.msg == \"DID NOT RAISE {}\".format(repr(ValueError))\n else:\n assert False, \"Expected pytest.raises.Exception\"\n\n try:\n with pytest.raises(ValueError):\n pass\n except pytest.raises.Exception as e:\n assert e.msg == \"DID NOT RAISE {}\".format(repr(ValueError))\n else:\n assert False, \"Expected pytest.raises.Exception\"\n\n def test_custom_raise_message(self):\n message = \"TEST_MESSAGE\"\n try:\n with pytest.warns(PytestDeprecationWarning):\n with pytest.raises(ValueError, message=message):\n pass\n except pytest.raises.Exception as e:\n assert e.msg == message\n else:\n assert False, \"Expected pytest.raises.Exception\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_TestRaises.test_raises_cyclic_reference_TestRaises.test_raises_cyclic_reference.for_o_in_gc_get_objects_.assert_type_o_is_not_T": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_TestRaises.test_raises_cyclic_reference_TestRaises.test_raises_cyclic_reference.for_o_in_gc_get_objects_.assert_type_o_is_not_T", "embedding": null, "metadata": {"file_path": "testing/python/raises.py", "file_name": "raises.py", "file_type": "text/x-python", "category": "implementation", "start_line": 185, "end_line": 210, "span_ids": ["TestRaises.test_raises_cyclic_reference.T.__call__", "TestRaises.test_raises_cyclic_reference.T", "TestRaises.test_raises_cyclic_reference"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRaises(object):\n\n @pytest.mark.parametrize(\"method\", [\"function\", \"with\"])\n def test_raises_cyclic_reference(self, method):\n \"\"\"\n Ensure pytest.raises does not leave a reference cycle (#1965).\n \"\"\"\n import gc\n\n class T(object):\n def __call__(self):\n raise ValueError\n\n t = T()\n if method == \"function\":\n pytest.raises(ValueError, t)\n else:\n with pytest.raises(ValueError):\n t()\n\n # ensure both forms of pytest.raises don't leave exceptions in sys.exc_info()\n assert sys.exc_info() == (None, None, None)\n\n del t\n\n # ensure the t instance is not stuck in a cyclic reference\n for o in gc.get_objects():\n assert type(o) is not T", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_TestRaises.test_raises_match_TestRaises.test_raises_match.with_pytest_raises_Assert.with_pytest_raises_ValueE.int_asdf_base_10_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_TestRaises.test_raises_match_TestRaises.test_raises_match.with_pytest_raises_Assert.with_pytest_raises_ValueE.int_asdf_base_10_", "embedding": null, "metadata": {"file_path": "testing/python/raises.py", "file_name": "raises.py", "file_type": "text/x-python", "category": "implementation", "start_line": 212, "end_line": 227, "span_ids": ["TestRaises.test_raises_match"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRaises(object):\n\n def test_raises_match(self):\n msg = r\"with base \\d+\"\n with pytest.raises(ValueError, match=msg):\n int(\"asdf\")\n\n msg = \"with base 10\"\n with pytest.raises(ValueError, match=msg):\n int(\"asdf\")\n\n msg = \"with base 16\"\n expr = r\"Pattern '{}' not found in 'invalid literal for int\\(\\) with base 10: 'asdf''\".format(\n msg\n )\n with pytest.raises(AssertionError, match=expr):\n with pytest.raises(ValueError, match=msg):\n int(\"asdf\", base=10)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_TestRaises.test_raises_match_wrong_type_TestRaises.test_raises_exception_looks_iterable.with_pytest_raises_.pytest_raises_ClassLooksI": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_TestRaises.test_raises_match_wrong_type_TestRaises.test_raises_exception_looks_iterable.with_pytest_raises_.pytest_raises_ClassLooksI", "embedding": null, "metadata": {"file_path": "testing/python/raises.py", "file_name": "raises.py", "file_type": "text/x-python", "category": "implementation", "start_line": 229, "end_line": 257, "span_ids": ["TestRaises.test_raises_exception_looks_iterable.Meta", "TestRaises.test_raises_exception_looks_iterable.ClassLooksIterableException:2", "TestRaises.test_raises_exception_looks_iterable.ClassLooksIterableException", "TestRaises.test_raises_match_wrong_type", "TestRaises.test_raises_exception_looks_iterable.Meta.__getitem__", "TestRaises.test_raises_exception_looks_iterable"], "tokens": 195}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRaises(object):\n\n def test_raises_match_wrong_type(self):\n \"\"\"Raising an exception with the wrong type and match= given.\n\n pytest should throw the unexpected exception - the pattern match is not\n really relevant if we got a different exception.\n \"\"\"\n with pytest.raises(ValueError):\n with pytest.raises(IndexError, match=\"nomatch\"):\n int(\"asdf\")\n\n def test_raises_exception_looks_iterable(self):\n from six import add_metaclass\n\n class Meta(type(object)):\n def __getitem__(self, item):\n return 1 / 0\n\n def __len__(self):\n return 1\n\n @add_metaclass(Meta)\n class ClassLooksIterableException(Exception):\n pass\n\n with pytest.raises(\n Failed,\n match=r\"DID NOT RAISE \",\n ):\n pytest.raises(ClassLooksIterableException, lambda: None)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_TestRaises.test_raises_with_raising_dunder_class_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/raises.py_TestRaises.test_raises_with_raising_dunder_class_", "embedding": null, "metadata": {"file_path": "testing/python/raises.py", "file_name": "raises.py", "file_type": "text/x-python", "category": "implementation", "start_line": 259, "end_line": 280, "span_ids": ["TestRaises.test_raises_with_raising_dunder_class.CrappyClass.__class__", "TestRaises.test_raises_with_raising_dunder_class", "TestRaises.test_raises_with_raising_dunder_class.CrappyClass"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRaises(object):\n\n def test_raises_with_raising_dunder_class(self):\n \"\"\"Test current behavior with regard to exceptions via __class__ (#4284).\"\"\"\n\n class CrappyClass(Exception):\n @property\n def __class__(self):\n assert False, \"via __class__\"\n\n if six.PY2:\n with pytest.raises(pytest.fail.Exception) as excinfo:\n with pytest.raises(CrappyClass()):\n pass\n assert \"DID NOT RAISE\" in excinfo.value.args[0]\n\n with pytest.raises(CrappyClass) as excinfo:\n raise CrappyClass()\n else:\n with pytest.raises(AssertionError) as excinfo:\n with pytest.raises(CrappyClass()):\n pass\n assert \"via __class__\" in excinfo.value.args[0]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_pytest_test_show_only_active_fixtures.assert__arg0_not_in_res": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_pytest_test_show_only_active_fixtures.assert__arg0_not_in_res", "embedding": null, "metadata": {"file_path": "testing/python/setup_only.py", "file_name": "setup_only.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 30, "span_ids": ["test_show_only_active_fixtures", "imports", "mode"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n\n@pytest.fixture(params=[\"--setup-only\", \"--setup-plan\", \"--setup-show\"], scope=\"module\")\ndef mode(request):\n return request.param\n\n\ndef test_show_only_active_fixtures(testdir, mode):\n p = testdir.makepyfile(\n '''\n import pytest\n @pytest.fixture\n def _arg0():\n \"\"\"hidden arg0 fixture\"\"\"\n @pytest.fixture\n def arg1():\n \"\"\"arg1 docstring\"\"\"\n def test_arg1(arg1):\n pass\n '''\n )\n\n result = testdir.runpytest(mode, p)\n assert result.ret == 0\n\n result.stdout.fnmatch_lines(\n [\"*SETUP F arg1*\", \"*test_arg1 (fixtures used: arg1)*\", \"*TEARDOWN F arg1*\"]\n )\n assert \"_arg0\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_show_different_scopes_test_show_different_scopes.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_show_different_scopes_test_show_different_scopes.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/setup_only.py", "file_name": "setup_only.py", "file_type": "text/x-python", "category": "implementation", "start_line": 33, "end_line": 59, "span_ids": ["test_show_different_scopes"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_show_different_scopes(testdir, mode):\n p = testdir.makepyfile(\n '''\n import pytest\n @pytest.fixture\n def arg_function():\n \"\"\"function scoped fixture\"\"\"\n @pytest.fixture(scope='session')\n def arg_session():\n \"\"\"session scoped fixture\"\"\"\n def test_arg1(arg_session, arg_function):\n pass\n '''\n )\n\n result = testdir.runpytest(mode, p)\n assert result.ret == 0\n\n result.stdout.fnmatch_lines(\n [\n \"SETUP S arg_session*\",\n \"*SETUP F arg_function*\",\n \"*test_arg1 (fixtures used: arg_function, arg_session)*\",\n \"*TEARDOWN F arg_function*\",\n \"TEARDOWN S arg_session*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_show_nested_fixtures_test_show_nested_fixtures.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_show_nested_fixtures_test_show_nested_fixtures.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/setup_only.py", "file_name": "setup_only.py", "file_type": "text/x-python", "category": "implementation", "start_line": 62, "end_line": 93, "span_ids": ["test_show_nested_fixtures"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_show_nested_fixtures(testdir, mode):\n testdir.makeconftest(\n '''\n import pytest\n @pytest.fixture(scope='session')\n def arg_same():\n \"\"\"session scoped fixture\"\"\"\n '''\n )\n p = testdir.makepyfile(\n '''\n import pytest\n @pytest.fixture(scope='function')\n def arg_same(arg_same):\n \"\"\"function scoped fixture\"\"\"\n def test_arg1(arg_same):\n pass\n '''\n )\n\n result = testdir.runpytest(mode, p)\n assert result.ret == 0\n\n result.stdout.fnmatch_lines(\n [\n \"SETUP S arg_same*\",\n \"*SETUP F arg_same (fixtures used: arg_same)*\",\n \"*test_arg1 (fixtures used: arg_same)*\",\n \"*TEARDOWN F arg_same*\",\n \"TEARDOWN S arg_same*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_show_fixtures_with_autouse_test_show_fixtures_with_autouse.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_show_fixtures_with_autouse_test_show_fixtures_with_autouse.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/setup_only.py", "file_name": "setup_only.py", "file_type": "text/x-python", "category": "implementation", "start_line": 96, "end_line": 120, "span_ids": ["test_show_fixtures_with_autouse"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_show_fixtures_with_autouse(testdir, mode):\n p = testdir.makepyfile(\n '''\n import pytest\n @pytest.fixture\n def arg_function():\n \"\"\"function scoped fixture\"\"\"\n @pytest.fixture(scope='session', autouse=True)\n def arg_session():\n \"\"\"session scoped fixture\"\"\"\n def test_arg1(arg_function):\n pass\n '''\n )\n\n result = testdir.runpytest(mode, p)\n assert result.ret == 0\n\n result.stdout.fnmatch_lines(\n [\n \"SETUP S arg_session*\",\n \"*SETUP F arg_function*\",\n \"*test_arg1 (fixtures used: arg_function, arg_session)*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_show_fixtures_with_parameters_test_show_fixtures_with_parameters.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_show_fixtures_with_parameters_test_show_fixtures_with_parameters.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/setup_only.py", "file_name": "setup_only.py", "file_type": "text/x-python", "category": "implementation", "start_line": 123, "end_line": 153, "span_ids": ["test_show_fixtures_with_parameters"], "tokens": 183}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_show_fixtures_with_parameters(testdir, mode):\n testdir.makeconftest(\n '''\n import pytest\n @pytest.fixture(scope='session', params=['foo', 'bar'])\n def arg_same():\n \"\"\"session scoped fixture\"\"\"\n '''\n )\n p = testdir.makepyfile(\n '''\n import pytest\n @pytest.fixture(scope='function')\n def arg_other(arg_same):\n \"\"\"function scoped fixture\"\"\"\n def test_arg1(arg_other):\n pass\n '''\n )\n\n result = testdir.runpytest(mode, p)\n assert result.ret == 0\n\n result.stdout.fnmatch_lines(\n [\n \"SETUP S arg_same?foo?\",\n \"TEARDOWN S arg_same?foo?\",\n \"SETUP S arg_same?bar?\",\n \"TEARDOWN S arg_same?bar?\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_show_fixtures_with_parameter_ids_test_show_fixtures_with_parameter_ids.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_show_fixtures_with_parameter_ids_test_show_fixtures_with_parameter_ids.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/setup_only.py", "file_name": "setup_only.py", "file_type": "text/x-python", "category": "implementation", "start_line": 156, "end_line": 182, "span_ids": ["test_show_fixtures_with_parameter_ids"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_show_fixtures_with_parameter_ids(testdir, mode):\n testdir.makeconftest(\n '''\n import pytest\n @pytest.fixture(\n scope='session', params=['foo', 'bar'], ids=['spam', 'ham'])\n def arg_same():\n \"\"\"session scoped fixture\"\"\"\n '''\n )\n p = testdir.makepyfile(\n '''\n import pytest\n @pytest.fixture(scope='function')\n def arg_other(arg_same):\n \"\"\"function scoped fixture\"\"\"\n def test_arg1(arg_other):\n pass\n '''\n )\n\n result = testdir.runpytest(mode, p)\n assert result.ret == 0\n\n result.stdout.fnmatch_lines(\n [\"SETUP S arg_same?spam?\", \"SETUP S arg_same?ham?\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_show_fixtures_with_parameter_ids_function_test_show_fixtures_with_parameter_ids_function.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_show_fixtures_with_parameter_ids_function_test_show_fixtures_with_parameter_ids_function.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/setup_only.py", "file_name": "setup_only.py", "file_type": "text/x-python", "category": "implementation", "start_line": 185, "end_line": 200, "span_ids": ["test_show_fixtures_with_parameter_ids_function"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_show_fixtures_with_parameter_ids_function(testdir, mode):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture(params=['foo', 'bar'], ids=lambda p: p.upper())\n def foobar():\n pass\n def test_foobar(foobar):\n pass\n \"\"\"\n )\n\n result = testdir.runpytest(mode, p)\n assert result.ret == 0\n\n result.stdout.fnmatch_lines([\"*SETUP F foobar?FOO?\", \"*SETUP F foobar?BAR?\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_dynamic_fixture_request_test_dynamic_fixture_request.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_dynamic_fixture_request_test_dynamic_fixture_request.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/setup_only.py", "file_name": "setup_only.py", "file_type": "text/x-python", "category": "implementation", "start_line": 203, "end_line": 226, "span_ids": ["test_dynamic_fixture_request"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dynamic_fixture_request(testdir):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture()\n def dynamically_requested_fixture():\n pass\n @pytest.fixture()\n def dependent_fixture(request):\n request.getfixturevalue('dynamically_requested_fixture')\n def test_dyn(dependent_fixture):\n pass\n \"\"\"\n )\n\n result = testdir.runpytest(\"--setup-only\", p)\n assert result.ret == 0\n\n result.stdout.fnmatch_lines(\n [\n \"*SETUP F dynamically_requested_fixture\",\n \"*TEARDOWN F dynamically_requested_fixture\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_capturing_test_capturing.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_capturing_test_capturing.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/setup_only.py", "file_name": "setup_only.py", "file_type": "text/x-python", "category": "implementation", "start_line": 229, "end_line": 248, "span_ids": ["test_capturing"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_capturing(testdir):\n p = testdir.makepyfile(\n \"\"\"\n import pytest, sys\n @pytest.fixture()\n def one():\n sys.stdout.write('this should be captured')\n sys.stderr.write('this should also be captured')\n @pytest.fixture()\n def two(one):\n assert 0\n def test_capturing(two):\n pass\n \"\"\"\n )\n\n result = testdir.runpytest(\"--setup-only\", p)\n result.stdout.fnmatch_lines(\n [\"this should be captured\", \"this should also be captured\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_show_fixtures_and_execute_test_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_only.py_test_show_fixtures_and_execute_test_", "embedding": null, "metadata": {"file_path": "testing/python/setup_only.py", "file_name": "setup_only.py", "file_type": "text/x-python", "category": "implementation", "start_line": 251, "end_line": 270, "span_ids": ["test_show_fixtures_and_execute_test"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_show_fixtures_and_execute_test(testdir):\n \"\"\" Verifies that setups are shown and tests are executed. \"\"\"\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture\n def arg():\n assert True\n def test_arg(arg):\n assert False\n \"\"\"\n )\n\n result = testdir.runpytest(\"--setup-show\", p)\n assert result.ret == 1\n\n result.stdout.fnmatch_lines(\n [\"*SETUP F arg*\", \"*test_arg (fixtures used: arg)F*\", \"*TEARDOWN F arg*\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_plan.py_test_show_fixtures_and_test_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/setup_plan.py_test_show_fixtures_and_test_", "embedding": null, "metadata": {"file_path": "testing/python/setup_plan.py", "file_name": "setup_plan.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 20, "span_ids": ["test_show_fixtures_and_test"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_show_fixtures_and_test(testdir):\n \"\"\" Verifies that fixtures are not executed. \"\"\"\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture\n def arg():\n assert False\n def test_arg(arg):\n assert False\n \"\"\"\n )\n\n result = testdir.runpytest(\"--setup-plan\", p)\n assert result.ret == 0\n\n result.stdout.fnmatch_lines(\n [\"*SETUP F arg*\", \"*test_arg (fixtures used: arg)\", \"*TEARDOWN F arg*\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/show_fixtures_per_test.py__coding_utf_8__test_fixtures_in_module.assert__arg0_not_in_res": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/show_fixtures_per_test.py__coding_utf_8__test_fixtures_in_module.assert__arg0_not_in_res", "embedding": null, "metadata": {"file_path": "testing/python/show_fixtures_per_test.py", "file_name": "show_fixtures_per_test.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 36, "span_ids": ["docstring", "test_no_items_should_not_show_output", "test_fixtures_in_module"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -*- coding: utf-8 -*-\n\n\ndef test_no_items_should_not_show_output(testdir):\n result = testdir.runpytest(\"--fixtures-per-test\")\n assert \"fixtures used by\" not in result.stdout.str()\n assert result.ret == 0\n\n\ndef test_fixtures_in_module(testdir):\n p = testdir.makepyfile(\n '''\n import pytest\n @pytest.fixture\n def _arg0():\n \"\"\"hidden arg0 fixture\"\"\"\n @pytest.fixture\n def arg1():\n \"\"\"arg1 docstring\"\"\"\n def test_arg1(arg1):\n pass\n '''\n )\n\n result = testdir.runpytest(\"--fixtures-per-test\", p)\n assert result.ret == 0\n\n result.stdout.fnmatch_lines(\n [\n \"*fixtures used by test_arg1*\",\n \"*(test_fixtures_in_module.py:9)*\",\n \"arg1\",\n \" arg1 docstring\",\n ]\n )\n assert \"_arg0\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/show_fixtures_per_test.py_test_fixtures_in_conftest_test_fixtures_in_conftest.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/show_fixtures_per_test.py_test_fixtures_in_conftest_test_fixtures_in_conftest.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/show_fixtures_per_test.py", "file_name": "show_fixtures_per_test.py", "file_type": "text/x-python", "category": "test", "start_line": 39, "end_line": 83, "span_ids": ["test_fixtures_in_conftest"], "tokens": 267}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fixtures_in_conftest(testdir):\n testdir.makeconftest(\n '''\n import pytest\n @pytest.fixture\n def arg1():\n \"\"\"arg1 docstring\"\"\"\n @pytest.fixture\n def arg2():\n \"\"\"arg2 docstring\"\"\"\n @pytest.fixture\n def arg3(arg1, arg2):\n \"\"\"arg3\n docstring\n \"\"\"\n '''\n )\n p = testdir.makepyfile(\n \"\"\"\n def test_arg2(arg2):\n pass\n def test_arg3(arg3):\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"--fixtures-per-test\", p)\n assert result.ret == 0\n\n result.stdout.fnmatch_lines(\n [\n \"*fixtures used by test_arg2*\",\n \"*(test_fixtures_in_conftest.py:2)*\",\n \"arg2\",\n \" arg2 docstring\",\n \"*fixtures used by test_arg3*\",\n \"*(test_fixtures_in_conftest.py:4)*\",\n \"arg1\",\n \" arg1 docstring\",\n \"arg2\",\n \" arg2 docstring\",\n \"arg3\",\n \" arg3\",\n \" docstring\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/show_fixtures_per_test.py_test_should_show_fixtures_used_by_test_test_should_show_fixtures_used_by_test.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/show_fixtures_per_test.py_test_should_show_fixtures_used_by_test_test_should_show_fixtures_used_by_test.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/show_fixtures_per_test.py", "file_name": "show_fixtures_per_test.py", "file_type": "text/x-python", "category": "test", "start_line": 86, "end_line": 120, "span_ids": ["test_should_show_fixtures_used_by_test"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_should_show_fixtures_used_by_test(testdir):\n testdir.makeconftest(\n '''\n import pytest\n @pytest.fixture\n def arg1():\n \"\"\"arg1 from conftest\"\"\"\n @pytest.fixture\n def arg2():\n \"\"\"arg2 from conftest\"\"\"\n '''\n )\n p = testdir.makepyfile(\n '''\n import pytest\n @pytest.fixture\n def arg1():\n \"\"\"arg1 from testmodule\"\"\"\n def test_args(arg1, arg2):\n pass\n '''\n )\n result = testdir.runpytest(\"--fixtures-per-test\", p)\n assert result.ret == 0\n\n result.stdout.fnmatch_lines(\n [\n \"*fixtures used by test_args*\",\n \"*(test_should_show_fixtures_used_by_test.py:6)*\",\n \"arg1\",\n \" arg1 from testmodule\",\n \"arg2\",\n \" arg2 from conftest\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/show_fixtures_per_test.py_test_verbose_include_private_fixtures_and_loc_test_verbose_include_private_fixtures_and_loc.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/show_fixtures_per_test.py_test_verbose_include_private_fixtures_and_loc_test_verbose_include_private_fixtures_and_loc.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/python/show_fixtures_per_test.py", "file_name": "show_fixtures_per_test.py", "file_type": "text/x-python", "category": "test", "start_line": 123, "end_line": 159, "span_ids": ["test_verbose_include_private_fixtures_and_loc"], "tokens": 257}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_verbose_include_private_fixtures_and_loc(testdir):\n testdir.makeconftest(\n '''\n import pytest\n @pytest.fixture\n def _arg1():\n \"\"\"_arg1 from conftest\"\"\"\n @pytest.fixture\n def arg2(_arg1):\n \"\"\"arg2 from conftest\"\"\"\n '''\n )\n p = testdir.makepyfile(\n '''\n import pytest\n @pytest.fixture\n def arg3():\n \"\"\"arg3 from testmodule\"\"\"\n def test_args(arg2, arg3):\n pass\n '''\n )\n result = testdir.runpytest(\"--fixtures-per-test\", \"-v\", p)\n assert result.ret == 0\n\n result.stdout.fnmatch_lines(\n [\n \"*fixtures used by test_args*\",\n \"*(test_verbose_include_private_fixtures_and_loc.py:6)*\",\n \"_arg1 -- conftest.py:3\",\n \" _arg1 from conftest\",\n \"arg2 -- conftest.py:6\",\n \" arg2 from conftest\",\n \"arg3 -- test_verbose_include_private_fixtures_and_loc.py:3\",\n \" arg3 from testmodule\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/show_fixtures_per_test.py_test_doctest_items_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/python/show_fixtures_per_test.py_test_doctest_items_", "embedding": null, "metadata": {"file_path": "testing/python/show_fixtures_per_test.py", "file_name": "show_fixtures_per_test.py", "file_type": "text/x-python", "category": "test", "start_line": 162, "end_line": 184, "span_ids": ["test_doctest_items"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_doctest_items(testdir):\n testdir.makepyfile(\n '''\n def foo():\n \"\"\"\n >>> 1 + 1\n 2\n \"\"\"\n '''\n )\n testdir.maketxtfile(\n \"\"\"\n >>> 1 + 1\n 2\n \"\"\"\n )\n result = testdir.runpytest(\n \"--fixtures-per-test\", \"--doctest-modules\", \"--doctest-glob=*.txt\", \"-v\"\n )\n assert result.ret == 0\n\n result.stdout.fnmatch_lines([\"*collected 2 items*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_argcomplete.py_from___future___import_ab_equal_with_bash.return.retval": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_argcomplete.py_from___future___import_ab_equal_with_bash.return.retval", "embedding": null, "metadata": {"file_path": "testing/test_argcomplete.py", "file_name": "test_argcomplete.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 22, "span_ids": ["imports", "equal_with_bash"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport subprocess\nimport sys\n\nimport pytest\n\n# test for _argcomplete but not specific for any application\n\n\ndef equal_with_bash(prefix, ffc, fc, out=None):\n res = ffc(prefix)\n res_bash = set(fc(prefix))\n retval = set(res) == res_bash\n if out:\n out.write(\"equal_with_bash({}) {} {}\\n\".format(prefix, retval, res))\n if not retval:\n out.write(\" python - bash: %s\\n\" % (set(res) - res_bash))\n out.write(\" bash - python: %s\\n\" % (res_bash - set(res)))\n return retval", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_argcomplete.py__copied_from_argcomplete__wrapcall.try_.except_subprocess_CalledP.return._": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_argcomplete.py__copied_from_argcomplete__wrapcall.try_.except_subprocess_CalledP.return._", "embedding": null, "metadata": {"file_path": "testing/test_argcomplete.py", "file_name": "test_argcomplete.py", "file_type": "text/x-python", "category": "test", "start_line": 25, "end_line": 46, "span_ids": ["_wrapcall", "equal_with_bash"], "tokens": 198}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# copied from argcomplete.completers as import from there\n# also pulls in argcomplete.__init__ which opens filedescriptor 9\n# this gives an IOError at the end of testrun\n\n\ndef _wrapcall(*args, **kargs):\n try:\n if sys.version_info > (2, 7):\n return subprocess.check_output(*args, **kargs).decode().splitlines()\n if \"stdout\" in kargs:\n raise ValueError(\"stdout argument not allowed, it will be overridden.\")\n process = subprocess.Popen(stdout=subprocess.PIPE, *args, **kargs)\n output, unused_err = process.communicate()\n retcode = process.poll()\n if retcode:\n cmd = kargs.get(\"args\")\n if cmd is None:\n cmd = args[0]\n raise subprocess.CalledProcessError(retcode, cmd)\n return output.decode().splitlines()\n except subprocess.CalledProcessError:\n return []", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_argcomplete.py_FilesCompleter_FilesCompleter.__call__.return.completion": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_argcomplete.py_FilesCompleter_FilesCompleter.__call__.return.completion", "embedding": null, "metadata": {"file_path": "testing/test_argcomplete.py", "file_name": "test_argcomplete.py", "file_type": "text/x-python", "category": "test", "start_line": 49, "end_line": 89, "span_ids": ["FilesCompleter", "FilesCompleter.__call__"], "tokens": 328}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class FilesCompleter(object):\n \"File completer class, optionally takes a list of allowed extensions\"\n\n def __init__(self, allowednames=(), directories=True):\n # Fix if someone passes in a string instead of a list\n if type(allowednames) is str:\n allowednames = [allowednames]\n\n self.allowednames = [x.lstrip(\"*\").lstrip(\".\") for x in allowednames]\n self.directories = directories\n\n def __call__(self, prefix, **kwargs):\n completion = []\n if self.allowednames:\n if self.directories:\n files = _wrapcall(\n [\"bash\", \"-c\", \"compgen -A directory -- '{p}'\".format(p=prefix)]\n )\n completion += [f + \"/\" for f in files]\n for x in self.allowednames:\n completion += _wrapcall(\n [\n \"bash\",\n \"-c\",\n \"compgen -A file -X '!*.{0}' -- '{p}'\".format(x, p=prefix),\n ]\n )\n else:\n completion += _wrapcall(\n [\"bash\", \"-c\", \"compgen -A file -- '{p}'\".format(p=prefix)]\n )\n\n anticomp = _wrapcall(\n [\"bash\", \"-c\", \"compgen -A directory -- '{p}'\".format(p=prefix)]\n )\n\n completion = list(set(completion) - set(anticomp))\n\n if self.directories:\n completion += [f + \"/\" for f in anticomp]\n return completion", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_argcomplete.py_TestArgComplete_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_argcomplete.py_TestArgComplete_", "embedding": null, "metadata": {"file_path": "testing/test_argcomplete.py", "file_name": "test_argcomplete.py", "file_type": "text/x-python", "category": "test", "start_line": 92, "end_line": 119, "span_ids": ["TestArgComplete", "TestArgComplete.test_remove_dir_prefix", "TestArgComplete.test_compare_with_compgen"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestArgComplete(object):\n @pytest.mark.skipif(\"sys.platform in ('win32', 'darwin')\")\n def test_compare_with_compgen(self, tmpdir):\n from _pytest._argcomplete import FastFilesCompleter\n\n ffc = FastFilesCompleter()\n fc = FilesCompleter()\n\n with tmpdir.as_cwd():\n assert equal_with_bash(\"\", ffc, fc, out=sys.stdout)\n\n tmpdir.ensure(\"data\")\n\n for x in [\"d\", \"data\", \"doesnotexist\", \"\"]:\n assert equal_with_bash(x, ffc, fc, out=sys.stdout)\n\n @pytest.mark.skipif(\"sys.platform in ('win32', 'darwin')\")\n def test_remove_dir_prefix(self):\n \"\"\"this is not compatible with compgen but it is with bash itself:\n ls /usr/\n \"\"\"\n from _pytest._argcomplete import FastFilesCompleter\n\n ffc = FastFilesCompleter()\n fc = FilesCompleter()\n for x in \"/usr/\".split():\n assert not equal_with_bash(x, ffc, fc, out=sys.stdout)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py__coding_utf_8__mock_config.return.Config_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py__coding_utf_8__mock_config.return.Config_", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 30, "span_ids": ["impl", "mock_config.Config", "mock_config", "docstring", "mock_config.Config:2", "imports"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport textwrap\n\nimport attr\nimport six\n\nimport _pytest.assertion as plugin\nimport pytest\nfrom _pytest import outcomes\nfrom _pytest.assertion import truncate\nfrom _pytest.assertion import util\n\nPY3 = sys.version_info >= (3, 0)\n\n\ndef mock_config():\n class Config(object):\n verbose = False\n\n def getoption(self, name):\n if name == \"verbose\":\n return self.verbose\n raise KeyError(\"Not mocked out: %s\" % name)\n\n return Config()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestImportHookInstallation_TestImportHookInstallation.test_conftest_assertion_rewrite.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestImportHookInstallation_TestImportHookInstallation.test_conftest_assertion_rewrite.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 33, "end_line": 64, "span_ids": ["TestImportHookInstallation.test_conftest_assertion_rewrite", "TestImportHookInstallation"], "tokens": 279}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestImportHookInstallation(object):\n @pytest.mark.parametrize(\"initial_conftest\", [True, False])\n @pytest.mark.parametrize(\"mode\", [\"plain\", \"rewrite\"])\n def test_conftest_assertion_rewrite(self, testdir, initial_conftest, mode):\n \"\"\"Test that conftest files are using assertion rewrite on import.\n (#1619)\n \"\"\"\n testdir.tmpdir.join(\"foo/tests\").ensure(dir=1)\n conftest_path = \"conftest.py\" if initial_conftest else \"foo/conftest.py\"\n contents = {\n conftest_path: \"\"\"\n import pytest\n @pytest.fixture\n def check_first():\n def check(values, value):\n assert values.pop(0) == value\n return check\n \"\"\",\n \"foo/tests/test_foo.py\": \"\"\"\n def test(check_first):\n check_first([10, 30], 30)\n \"\"\",\n }\n testdir.makepyfile(**contents)\n result = testdir.runpytest_subprocess(\"--assert=%s\" % mode)\n if mode == \"plain\":\n expected = \"E AssertionError\"\n elif mode == \"rewrite\":\n expected = \"*assert 10 == 30*\"\n else:\n assert 0\n result.stdout.fnmatch_lines([expected])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestImportHookInstallation.test_rewrite_assertions_pytester_plugin_TestImportHookInstallation.test_rewrite_assertions_pytester_plugin.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestImportHookInstallation.test_rewrite_assertions_pytester_plugin_TestImportHookInstallation.test_rewrite_assertions_pytester_plugin.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 66, "end_line": 81, "span_ids": ["TestImportHookInstallation.test_rewrite_assertions_pytester_plugin"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestImportHookInstallation(object):\n\n def test_rewrite_assertions_pytester_plugin(self, testdir):\n \"\"\"\n Assertions in the pytester plugin must also benefit from assertion\n rewriting (#1920).\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n pytest_plugins = ['pytester']\n def test_dummy_failure(testdir): # how meta!\n testdir.makepyfile('def test(): assert 0')\n r = testdir.inline_run()\n r.assertoutcome(passed=1)\n \"\"\"\n )\n result = testdir.runpytest_subprocess()\n result.stdout.fnmatch_lines([\"*assert 1 == 0*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestImportHookInstallation.test_pytest_plugins_rewrite_TestImportHookInstallation.test_pytest_plugins_rewrite.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestImportHookInstallation.test_pytest_plugins_rewrite_TestImportHookInstallation.test_pytest_plugins_rewrite.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 83, "end_line": 110, "span_ids": ["TestImportHookInstallation.test_pytest_plugins_rewrite"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestImportHookInstallation(object):\n\n @pytest.mark.parametrize(\"mode\", [\"plain\", \"rewrite\"])\n def test_pytest_plugins_rewrite(self, testdir, mode):\n contents = {\n \"conftest.py\": \"\"\"\n pytest_plugins = ['ham']\n \"\"\",\n \"ham.py\": \"\"\"\n import pytest\n @pytest.fixture\n def check_first():\n def check(values, value):\n assert values.pop(0) == value\n return check\n \"\"\",\n \"test_foo.py\": \"\"\"\n def test_foo(check_first):\n check_first([10, 30], 30)\n \"\"\",\n }\n testdir.makepyfile(**contents)\n result = testdir.runpytest_subprocess(\"--assert=%s\" % mode)\n if mode == \"plain\":\n expected = \"E AssertionError\"\n elif mode == \"rewrite\":\n expected = \"*assert 10 == 30*\"\n else:\n assert 0\n result.stdout.fnmatch_lines([expected])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestImportHookInstallation.test_pytest_plugins_rewrite_module_names_TestImportHookInstallation.test_pytest_plugins_rewrite_module_names.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestImportHookInstallation.test_pytest_plugins_rewrite_module_names_TestImportHookInstallation.test_pytest_plugins_rewrite_module_names.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 112, "end_line": 135, "span_ids": ["TestImportHookInstallation.test_pytest_plugins_rewrite_module_names"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestImportHookInstallation(object):\n\n @pytest.mark.parametrize(\"mode\", [\"str\", \"list\"])\n def test_pytest_plugins_rewrite_module_names(self, testdir, mode):\n \"\"\"Test that pluginmanager correct marks pytest_plugins variables\n for assertion rewriting if they are defined as plain strings or\n list of strings (#1888).\n \"\"\"\n plugins = '\"ham\"' if mode == \"str\" else '[\"ham\"]'\n contents = {\n \"conftest.py\": \"\"\"\n pytest_plugins = {plugins}\n \"\"\".format(\n plugins=plugins\n ),\n \"ham.py\": \"\"\"\n import pytest\n \"\"\",\n \"test_foo.py\": \"\"\"\n def test_foo(pytestconfig):\n assert 'ham' in pytestconfig.pluginmanager.rewrite_hook._must_rewrite\n \"\"\",\n }\n testdir.makepyfile(**contents)\n result = testdir.runpytest_subprocess(\"--assert=rewrite\")\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestImportHookInstallation.test_pytest_plugins_rewrite_module_names_correctly_TestImportHookInstallation.test_pytest_plugins_rewrite_module_names_correctly.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestImportHookInstallation.test_pytest_plugins_rewrite_module_names_correctly_TestImportHookInstallation.test_pytest_plugins_rewrite_module_names_correctly.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 137, "end_line": 153, "span_ids": ["TestImportHookInstallation.test_pytest_plugins_rewrite_module_names_correctly"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestImportHookInstallation(object):\n\n def test_pytest_plugins_rewrite_module_names_correctly(self, testdir):\n \"\"\"Test that we match files correctly when they are marked for rewriting (#2939).\"\"\"\n contents = {\n \"conftest.py\": \"\"\"\n pytest_plugins = \"ham\"\n \"\"\",\n \"ham.py\": \"\",\n \"hamster.py\": \"\",\n \"test_foo.py\": \"\"\"\n def test_foo(pytestconfig):\n assert pytestconfig.pluginmanager.rewrite_hook.find_module('ham') is not None\n assert pytestconfig.pluginmanager.rewrite_hook.find_module('hamster') is None\n \"\"\",\n }\n testdir.makepyfile(**contents)\n result = testdir.runpytest_subprocess(\"--assert=rewrite\")\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestImportHookInstallation.test_installed_plugin_rewrite_TestImportHookInstallation.test_installed_plugin_rewrite.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestImportHookInstallation.test_installed_plugin_rewrite_TestImportHookInstallation.test_installed_plugin_rewrite.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 155, "end_line": 239, "span_ids": ["TestImportHookInstallation.test_installed_plugin_rewrite"], "tokens": 628}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestImportHookInstallation(object):\n\n @pytest.mark.parametrize(\"mode\", [\"plain\", \"rewrite\"])\n @pytest.mark.parametrize(\"plugin_state\", [\"development\", \"installed\"])\n def test_installed_plugin_rewrite(self, testdir, mode, plugin_state, monkeypatch):\n monkeypatch.delenv(\"PYTEST_DISABLE_PLUGIN_AUTOLOAD\", raising=False)\n # Make sure the hook is installed early enough so that plugins\n # installed via setuptools are rewritten.\n testdir.tmpdir.join(\"hampkg\").ensure(dir=1)\n contents = {\n \"hampkg/__init__.py\": \"\"\"\n import pytest\n\n @pytest.fixture\n def check_first2():\n def check(values, value):\n assert values.pop(0) == value\n return check\n \"\"\",\n \"spamplugin.py\": \"\"\"\n import pytest\n from hampkg import check_first2\n\n @pytest.fixture\n def check_first():\n def check(values, value):\n assert values.pop(0) == value\n return check\n \"\"\",\n \"mainwrapper.py\": \"\"\"\n import pytest, pkg_resources\n\n plugin_state = \"{plugin_state}\"\n\n class DummyDistInfo(object):\n project_name = 'spam'\n version = '1.0'\n\n def _get_metadata(self, name):\n # 'RECORD' meta-data only available in installed plugins\n if name == 'RECORD' and plugin_state == \"installed\":\n return ['spamplugin.py,sha256=abc,123',\n 'hampkg/__init__.py,sha256=abc,123']\n # 'SOURCES.txt' meta-data only available for plugins in development mode\n elif name == 'SOURCES.txt' and plugin_state == \"development\":\n return ['spamplugin.py',\n 'hampkg/__init__.py']\n return []\n\n class DummyEntryPoint(object):\n name = 'spam'\n module_name = 'spam.py'\n attrs = ()\n extras = None\n dist = DummyDistInfo()\n\n def load(self, require=True, *args, **kwargs):\n import spamplugin\n return spamplugin\n\n def iter_entry_points(group, name=None):\n yield DummyEntryPoint()\n\n pkg_resources.iter_entry_points = iter_entry_points\n pytest.main()\n \"\"\".format(\n plugin_state=plugin_state\n ),\n \"test_foo.py\": \"\"\"\n def test(check_first):\n check_first([10, 30], 30)\n\n def test2(check_first2):\n check_first([10, 30], 30)\n \"\"\",\n }\n testdir.makepyfile(**contents)\n result = testdir.run(\n sys.executable, \"mainwrapper.py\", \"-s\", \"--assert=%s\" % mode\n )\n if mode == \"plain\":\n expected = \"E AssertionError\"\n elif mode == \"rewrite\":\n expected = \"*assert 10 == 30*\"\n else:\n assert 0\n result.stdout.fnmatch_lines([expected])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestImportHookInstallation.test_rewrite_ast_TestImportHookInstallation.test_register_assert_rewrite_checks_types.pytest_register_assert_re": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestImportHookInstallation.test_rewrite_ast_TestImportHookInstallation.test_register_assert_rewrite_checks_types.pytest_register_assert_re", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 241, "end_line": 291, "span_ids": ["TestImportHookInstallation.test_register_assert_rewrite_checks_types", "TestImportHookInstallation.test_rewrite_ast"], "tokens": 339}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestImportHookInstallation(object):\n\n def test_rewrite_ast(self, testdir):\n testdir.tmpdir.join(\"pkg\").ensure(dir=1)\n contents = {\n \"pkg/__init__.py\": \"\"\"\n import pytest\n pytest.register_assert_rewrite('pkg.helper')\n \"\"\",\n \"pkg/helper.py\": \"\"\"\n def tool():\n a, b = 2, 3\n assert a == b\n \"\"\",\n \"pkg/plugin.py\": \"\"\"\n import pytest, pkg.helper\n @pytest.fixture\n def tool():\n return pkg.helper.tool\n \"\"\",\n \"pkg/other.py\": \"\"\"\n values = [3, 2]\n def tool():\n assert values.pop() == 3\n \"\"\",\n \"conftest.py\": \"\"\"\n pytest_plugins = ['pkg.plugin']\n \"\"\",\n \"test_pkg.py\": \"\"\"\n import pkg.other\n def test_tool(tool):\n tool()\n def test_other():\n pkg.other.tool()\n \"\"\",\n }\n testdir.makepyfile(**contents)\n result = testdir.runpytest_subprocess(\"--assert=rewrite\")\n result.stdout.fnmatch_lines(\n [\n \">*assert a == b*\",\n \"E*assert 2 == 3*\",\n \">*assert values.pop() == 3*\",\n \"E*AssertionError\",\n ]\n )\n\n def test_register_assert_rewrite_checks_types(self):\n with pytest.raises(TypeError):\n pytest.register_assert_rewrite([\"pytest_tests_internal_non_existing\"])\n pytest.register_assert_rewrite(\n \"pytest_tests_internal_non_existing\", \"pytest_tests_internal_non_existing2\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestBinReprIntegration_callequal.return.plugin_pytest_assertrepr_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestBinReprIntegration_callequal.return.plugin_pytest_assertrepr_", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 294, "end_line": 323, "span_ids": ["TestBinReprIntegration", "callequal", "TestBinReprIntegration.test_pytest_assertrepr_compare_called"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestBinReprIntegration(object):\n def test_pytest_assertrepr_compare_called(self, testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n values = []\n def pytest_assertrepr_compare(op, left, right):\n values.append((op, left, right))\n\n @pytest.fixture\n def list(request):\n return values\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n def test_hello():\n assert 0 == 1\n def test_check(list):\n assert list == [(\"==\", 0, 1)]\n \"\"\"\n )\n result = testdir.runpytest(\"-v\")\n result.stdout.fnmatch_lines([\"*test_hello*FAIL*\", \"*test_check*PASS*\"])\n\n\ndef callequal(left, right, verbose=False):\n config = mock_config()\n config.verbose = verbose\n return plugin.pytest_assertrepr_compare(config, \"==\", left, right)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare_TestAssert_reprcompare.test_list.assert_len_expl_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare_TestAssert_reprcompare.test_list.assert_len_expl_1", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 326, "end_line": 359, "span_ids": ["TestAssert_reprcompare.test_summary", "TestAssert_reprcompare", "TestAssert_reprcompare.test_multiline_text_diff", "TestAssert_reprcompare.test_text_diff", "TestAssert_reprcompare.test_different_types", "TestAssert_reprcompare.test_text_skipping", "TestAssert_reprcompare.test_text_skipping_verbose", "TestAssert_reprcompare.test_list"], "tokens": 325}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssert_reprcompare(object):\n def test_different_types(self):\n assert callequal([0, 1], \"foo\") is None\n\n def test_summary(self):\n summary = callequal([0, 1], [0, 2])[0]\n assert len(summary) < 65\n\n def test_text_diff(self):\n diff = callequal(\"spam\", \"eggs\")[1:]\n assert \"- spam\" in diff\n assert \"+ eggs\" in diff\n\n def test_text_skipping(self):\n lines = callequal(\"a\" * 50 + \"spam\", \"a\" * 50 + \"eggs\")\n assert \"Skipping\" in lines[1]\n for line in lines:\n assert \"a\" * 50 not in line\n\n def test_text_skipping_verbose(self):\n lines = callequal(\"a\" * 50 + \"spam\", \"a\" * 50 + \"eggs\", verbose=True)\n assert \"- \" + \"a\" * 50 + \"spam\" in lines\n assert \"+ \" + \"a\" * 50 + \"eggs\" in lines\n\n def test_multiline_text_diff(self):\n left = \"foo\\nspam\\nbar\"\n right = \"foo\\neggs\\nbar\"\n diff = callequal(left, right)\n assert \"- spam\" in diff\n assert \"+ eggs\" in diff\n\n def test_list(self):\n expl = callequal([0, 1], [0, 2])\n assert len(expl) > 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare.test_iterable_full_diff_TestAssert_reprcompare.test_iterable_full_diff.assert_expl_endswith_text": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare.test_iterable_full_diff_TestAssert_reprcompare.test_iterable_full_diff.assert_expl_endswith_text", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 361, "end_line": 416, "span_ids": ["TestAssert_reprcompare.test_iterable_full_diff"], "tokens": 343}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssert_reprcompare(object):\n\n @pytest.mark.parametrize(\n [\"left\", \"right\", \"expected\"],\n [\n (\n [0, 1],\n [0, 2],\n \"\"\"\n Full diff:\n - [0, 1]\n ? ^\n + [0, 2]\n ? ^\n \"\"\",\n ),\n (\n {0: 1},\n {0: 2},\n \"\"\"\n Full diff:\n - {0: 1}\n ? ^\n + {0: 2}\n ? ^\n \"\"\",\n ),\n (\n {0, 1},\n {0, 2},\n \"\"\"\n Full diff:\n - set([0, 1])\n ? ^\n + set([0, 2])\n ? ^\n \"\"\"\n if not PY3\n else \"\"\"\n Full diff:\n - {0, 1}\n ? ^\n + {0, 2}\n ? ^\n \"\"\",\n ),\n ],\n )\n def test_iterable_full_diff(self, left, right, expected):\n \"\"\"Test the full diff assertion failure explanation.\n\n When verbose is False, then just a -v notice to get the diff is rendered,\n when verbose is True, then ndiff of the pprint is returned.\n \"\"\"\n expl = callequal(left, right, verbose=False)\n assert expl[-1] == \"Use -v to get the full diff\"\n expl = \"\\n\".join(callequal(left, right, verbose=True))\n assert expl.endswith(textwrap.dedent(expected).strip())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare.test_list_different_lengths_TestAssert_reprcompare.test_dict_omitting.for_line_in_lines_1_.assert_b_not_in_line": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare.test_list_different_lengths_TestAssert_reprcompare.test_dict_omitting.for_line_in_lines_1_.assert_b_not_in_line", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 418, "end_line": 433, "span_ids": ["TestAssert_reprcompare.test_dict", "TestAssert_reprcompare.test_dict_omitting", "TestAssert_reprcompare.test_list_different_lengths"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssert_reprcompare(object):\n\n def test_list_different_lengths(self):\n expl = callequal([0, 1], [0, 1, 2])\n assert len(expl) > 1\n expl = callequal([0, 1, 2], [0, 1])\n assert len(expl) > 1\n\n def test_dict(self):\n expl = callequal({\"a\": 0}, {\"a\": 1})\n assert len(expl) > 1\n\n def test_dict_omitting(self):\n lines = callequal({\"a\": 0, \"b\": 1}, {\"a\": 1, \"b\": 1})\n assert lines[1].startswith(\"Omitting 1 identical item\")\n assert \"Common items\" not in lines\n for line in lines[1:]:\n assert \"b\" not in line", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare.test_dict_omitting_with_verbosity_1_TestAssert_reprcompare.test_dict_omitting_with_verbosity_2.assert_lines_2_b_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare.test_dict_omitting_with_verbosity_1_TestAssert_reprcompare.test_dict_omitting_with_verbosity_2.assert_lines_2_b_", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 435, "end_line": 447, "span_ids": ["TestAssert_reprcompare.test_dict_omitting_with_verbosity_1", "TestAssert_reprcompare.test_dict_omitting_with_verbosity_2"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssert_reprcompare(object):\n\n def test_dict_omitting_with_verbosity_1(self):\n \"\"\" Ensure differing items are visible for verbosity=1 (#1512) \"\"\"\n lines = callequal({\"a\": 0, \"b\": 1}, {\"a\": 1, \"b\": 1}, verbose=1)\n assert lines[1].startswith(\"Omitting 1 identical item\")\n assert lines[2].startswith(\"Differing items\")\n assert lines[3] == \"{'a': 0} != {'a': 1}\"\n assert \"Common items\" not in lines\n\n def test_dict_omitting_with_verbosity_2(self):\n lines = callequal({\"a\": 0, \"b\": 1}, {\"a\": 1, \"b\": 1}, verbose=2)\n assert lines[1].startswith(\"Common items:\")\n assert \"Omitting\" not in lines[1]\n assert lines[2] == \"{'b': 1}\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare.test_dict_different_items_TestAssert_reprcompare.test_dict_different_items.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare.test_dict_different_items_TestAssert_reprcompare.test_dict_different_items.None_1", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 449, "end_line": 471, "span_ids": ["TestAssert_reprcompare.test_dict_different_items"], "tokens": 259}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssert_reprcompare(object):\n\n def test_dict_different_items(self):\n lines = callequal({\"a\": 0}, {\"b\": 1, \"c\": 2}, verbose=2)\n assert lines == [\n \"{'a': 0} == {'b': 1, 'c': 2}\",\n \"Left contains 1 more item:\",\n \"{'a': 0}\",\n \"Right contains 2 more items:\",\n \"{'b': 1, 'c': 2}\",\n \"Full diff:\",\n \"- {'a': 0}\",\n \"+ {'b': 1, 'c': 2}\",\n ]\n lines = callequal({\"b\": 1, \"c\": 2}, {\"a\": 0}, verbose=2)\n assert lines == [\n \"{'b': 1, 'c': 2} == {'a': 0}\",\n \"Left contains 2 more items:\",\n \"{'b': 1, 'c': 2}\",\n \"Right contains 1 more item:\",\n \"{'a': 0}\",\n \"Full diff:\",\n \"- {'b': 1, 'c': 2}\",\n \"+ {'a': 0}\",\n ]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare.test_sequence_different_items_TestAssert_reprcompare.test_sequence_different_items.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare.test_sequence_different_items_TestAssert_reprcompare.test_sequence_different_items.None_1", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 473, "end_line": 491, "span_ids": ["TestAssert_reprcompare.test_sequence_different_items"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssert_reprcompare(object):\n\n def test_sequence_different_items(self):\n lines = callequal((1, 2), (3, 4, 5), verbose=2)\n assert lines == [\n \"(1, 2) == (3, 4, 5)\",\n \"At index 0 diff: 1 != 3\",\n \"Right contains one more item: 5\",\n \"Full diff:\",\n \"- (1, 2)\",\n \"+ (3, 4, 5)\",\n ]\n lines = callequal((1, 2, 3), (4,), verbose=2)\n assert lines == [\n \"(1, 2, 3) == (4,)\",\n \"At index 0 diff: 1 != 4\",\n \"Left contains 2 more items, first extra item: 2\",\n \"Full diff:\",\n \"- (1, 2, 3)\",\n \"+ (4,)\",\n ]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare.test_set_TestAssert_reprcompare.test_Sequence.assert_len_expl_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare.test_set_TestAssert_reprcompare.test_Sequence.assert_len_expl_1", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 493, "end_line": 530, "span_ids": ["TestAssert_reprcompare.test_set", "TestAssert_reprcompare.test_Sequence.TestSequence:2", "TestAssert_reprcompare.test_Sequence", "TestAssert_reprcompare.test_frozenzet", "TestAssert_reprcompare.test_Sequence.TestSequence"], "tokens": 281}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssert_reprcompare(object):\n\n def test_set(self):\n expl = callequal({0, 1}, {0, 2})\n assert len(expl) > 1\n\n def test_frozenzet(self):\n expl = callequal(frozenset([0, 1]), {0, 2})\n assert len(expl) > 1\n\n def test_Sequence(self):\n if sys.version_info >= (3, 3):\n import collections.abc as collections_abc\n else:\n import collections as collections_abc\n if not hasattr(collections_abc, \"MutableSequence\"):\n pytest.skip(\"cannot import MutableSequence\")\n MutableSequence = collections_abc.MutableSequence\n\n class TestSequence(MutableSequence): # works with a Sequence subclass\n def __init__(self, iterable):\n self.elements = list(iterable)\n\n def __getitem__(self, item):\n return self.elements[item]\n\n def __len__(self):\n return len(self.elements)\n\n def __setitem__(self, item, value):\n pass\n\n def __delitem__(self, item):\n pass\n\n def insert(self, item, index):\n pass\n\n expl = callequal(TestSequence([0, 1]), list([0, 2]))\n assert len(expl) > 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare.test_list_tuples_TestAssert_reprcompare.test_repr_verbose.None_5": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare.test_list_tuples_TestAssert_reprcompare.test_repr_verbose.None_5", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 532, "end_line": 560, "span_ids": ["TestAssert_reprcompare.test_list_tuples", "TestAssert_reprcompare.test_repr_verbose", "TestAssert_reprcompare.test_repr_verbose.Nums", "TestAssert_reprcompare.test_repr_verbose.Nums.__init__"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssert_reprcompare(object):\n\n def test_list_tuples(self):\n expl = callequal([], [(1, 2)])\n assert len(expl) > 1\n expl = callequal([(1, 2)], [])\n assert len(expl) > 1\n\n def test_repr_verbose(self):\n class Nums:\n def __init__(self, nums):\n self.nums = nums\n\n def __repr__(self):\n return str(self.nums)\n\n list_x = list(range(5000))\n list_y = list(range(5000))\n list_y[len(list_y) // 2] = 3\n nums_x = Nums(list_x)\n nums_y = Nums(list_y)\n\n assert callequal(nums_x, nums_y) is None\n\n expl = callequal(nums_x, nums_y, verbose=1)\n assert \"-\" + repr(nums_x) in expl\n assert \"+\" + repr(nums_y) in expl\n\n expl = callequal(nums_x, nums_y, verbose=2)\n assert \"-\" + repr(nums_x) in expl\n assert \"+\" + repr(nums_y) in expl", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare.test_list_bad_repr_TestAssert_reprcompare.test_mojibake.assert_msg": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare.test_list_bad_repr_TestAssert_reprcompare.test_mojibake.assert_msg", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 562, "end_line": 621, "span_ids": ["TestAssert_reprcompare.test_unicode", "TestAssert_reprcompare.test_one_repr_empty.A", "TestAssert_reprcompare.test_nonascii_text.A.__repr__", "TestAssert_reprcompare.test_nonascii_text.A", "TestAssert_reprcompare.test_one_repr_empty.A.__repr__", "TestAssert_reprcompare.test_list_bad_repr.A", "TestAssert_reprcompare.test_nonascii_text", "TestAssert_reprcompare.test_one_repr_empty", "TestAssert_reprcompare.test_list_bad_repr", "TestAssert_reprcompare.test_format_nonascii_explanation", "TestAssert_reprcompare.test_repr_no_exc", "TestAssert_reprcompare.test_mojibake", "TestAssert_reprcompare.test_list_bad_repr.A.__repr__"], "tokens": 391}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssert_reprcompare(object):\n\n def test_list_bad_repr(self):\n class A(object):\n def __repr__(self):\n raise ValueError(42)\n\n expl = callequal([], [A()])\n assert \"ValueError\" in \"\".join(expl)\n expl = callequal({}, {\"1\": A()})\n assert \"faulty\" in \"\".join(expl)\n\n def test_one_repr_empty(self):\n \"\"\"\n the faulty empty string repr did trigger\n an unbound local error in _diff_text\n \"\"\"\n\n class A(str):\n def __repr__(self):\n return \"\"\n\n expl = callequal(A(), \"\")\n assert not expl\n\n def test_repr_no_exc(self):\n expl = \" \".join(callequal(\"foo\", \"bar\"))\n assert \"raised in repr()\" not in expl\n\n def test_unicode(self):\n left = u\"\u00a3\u20ac\"\n right = u\"\u00a3\"\n expl = callequal(left, right)\n assert expl[0] == u\"'\u00a3\u20ac' == '\u00a3'\"\n assert expl[1] == u\"- \u00a3\u20ac\"\n assert expl[2] == u\"+ \u00a3\"\n\n def test_nonascii_text(self):\n \"\"\"\n :issue: 877\n non ascii python2 str caused a UnicodeDecodeError\n \"\"\"\n\n class A(str):\n def __repr__(self):\n return \"\\xff\"\n\n expl = callequal(A(), \"1\")\n assert expl\n\n def test_format_nonascii_explanation(self):\n assert util.format_explanation(\"\u03bb\")\n\n def test_mojibake(self):\n # issue 429\n left = b\"e\"\n right = b\"\\xc3\\xa9\"\n expl = callequal(left, right)\n for line in expl:\n assert isinstance(line, six.text_type)\n msg = u\"\\n\".join(expl)\n assert msg", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare_dataclass_TestAssert_reprcompare_dataclass.test_dataclasses.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare_dataclass_TestAssert_reprcompare_dataclass.test_dataclasses.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 627, "end_line": 639, "span_ids": ["TestAssert_reprcompare_dataclass.test_dataclasses", "TestAssert_reprcompare_dataclass"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssert_reprcompare_dataclass(object):\n @pytest.mark.skipif(sys.version_info < (3, 7), reason=\"Dataclasses in Python3.7+\")\n def test_dataclasses(self, testdir):\n p = testdir.copy_example(\"dataclasses/test_compare_dataclasses.py\")\n result = testdir.runpytest(p)\n result.assert_outcomes(failed=1, passed=0)\n result.stdout.fnmatch_lines(\n [\n \"*Omitting 1 identical items, use -vv to show*\",\n \"*Differing attributes:*\",\n \"*field_b: 'b' != 'c'*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare_dataclass.test_dataclasses_verbose_TestAssert_reprcompare_dataclass.test_dataclasses_verbose.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare_dataclass.test_dataclasses_verbose_TestAssert_reprcompare_dataclass.test_dataclasses_verbose.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 641, "end_line": 653, "span_ids": ["TestAssert_reprcompare_dataclass.test_dataclasses_verbose"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssert_reprcompare_dataclass(object):\n\n @pytest.mark.skipif(sys.version_info < (3, 7), reason=\"Dataclasses in Python3.7+\")\n def test_dataclasses_verbose(self, testdir):\n p = testdir.copy_example(\"dataclasses/test_compare_dataclasses_verbose.py\")\n result = testdir.runpytest(p, \"-vv\")\n result.assert_outcomes(failed=1, passed=0)\n result.stdout.fnmatch_lines(\n [\n \"*Matching attributes:*\",\n \"*['field_a']*\",\n \"*Differing attributes:*\",\n \"*field_b: 'b' != 'c'*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare_dataclass.test_dataclasses_with_attribute_comparison_off_TestAssert_reprcompare_dataclass.test_comparing_two_different_data_classes.result_assert_outcomes_fa": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare_dataclass.test_dataclasses_with_attribute_comparison_off_TestAssert_reprcompare_dataclass.test_comparing_two_different_data_classes.result_assert_outcomes_fa", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 655, "end_line": 669, "span_ids": ["TestAssert_reprcompare_dataclass.test_dataclasses_with_attribute_comparison_off", "TestAssert_reprcompare_dataclass.test_comparing_two_different_data_classes"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssert_reprcompare_dataclass(object):\n\n @pytest.mark.skipif(sys.version_info < (3, 7), reason=\"Dataclasses in Python3.7+\")\n def test_dataclasses_with_attribute_comparison_off(self, testdir):\n p = testdir.copy_example(\n \"dataclasses/test_compare_dataclasses_field_comparison_off.py\"\n )\n result = testdir.runpytest(p, \"-vv\")\n result.assert_outcomes(failed=0, passed=1)\n\n @pytest.mark.skipif(sys.version_info < (3, 7), reason=\"Dataclasses in Python3.7+\")\n def test_comparing_two_different_data_classes(self, testdir):\n p = testdir.copy_example(\n \"dataclasses/test_compare_two_different_dataclasses.py\"\n )\n result = testdir.runpytest(p, \"-vv\")\n result.assert_outcomes(failed=0, passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare_attrsclass_TestAssert_reprcompare_attrsclass.test_attrs_verbose.assert_lines_2_fie": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare_attrsclass_TestAssert_reprcompare_attrsclass.test_attrs_verbose.assert_lines_2_fie", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 672, "end_line": 700, "span_ids": ["TestAssert_reprcompare_attrsclass.test_attrs.SimpleDataObject:2", "TestAssert_reprcompare_attrsclass.test_attrs", "TestAssert_reprcompare_attrsclass.test_attrs_verbose", "TestAssert_reprcompare_attrsclass.test_attrs.SimpleDataObject", "TestAssert_reprcompare_attrsclass", "TestAssert_reprcompare_attrsclass.test_attrs_verbose.SimpleDataObject", "TestAssert_reprcompare_attrsclass.test_attrs_verbose.SimpleDataObject:2"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssert_reprcompare_attrsclass(object):\n def test_attrs(self):\n @attr.s\n class SimpleDataObject(object):\n field_a = attr.ib()\n field_b = attr.ib()\n\n left = SimpleDataObject(1, \"b\")\n right = SimpleDataObject(1, \"c\")\n\n lines = callequal(left, right)\n assert lines[1].startswith(\"Omitting 1 identical item\")\n assert \"Matching attributes\" not in lines\n for line in lines[1:]:\n assert \"field_a\" not in line\n\n def test_attrs_verbose(self):\n @attr.s\n class SimpleDataObject(object):\n field_a = attr.ib()\n field_b = attr.ib()\n\n left = SimpleDataObject(1, \"b\")\n right = SimpleDataObject(1, \"c\")\n\n lines = callequal(left, right, verbose=2)\n assert lines[1].startswith(\"Matching attributes:\")\n assert \"Omitting\" not in lines[1]\n assert lines[2] == \"['field_a']\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare_attrsclass.test_attrs_with_attribute_comparison_off_TestAssert_reprcompare_attrsclass.test_comparing_two_different_attrs_classes.assert_lines_is_None": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestAssert_reprcompare_attrsclass.test_attrs_with_attribute_comparison_off_TestAssert_reprcompare_attrsclass.test_comparing_two_different_attrs_classes.assert_lines_is_None", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 702, "end_line": 733, "span_ids": ["TestAssert_reprcompare_attrsclass.test_comparing_two_different_attrs_classes.SimpleDataObjectOne:2", "TestAssert_reprcompare_attrsclass.test_attrs_with_attribute_comparison_off", "TestAssert_reprcompare_attrsclass.test_comparing_two_different_attrs_classes.SimpleDataObjectTwo:2", "TestAssert_reprcompare_attrsclass.test_comparing_two_different_attrs_classes.SimpleDataObjectOne", "TestAssert_reprcompare_attrsclass.test_attrs_with_attribute_comparison_off.SimpleDataObject", "TestAssert_reprcompare_attrsclass.test_comparing_two_different_attrs_classes.SimpleDataObjectTwo", "TestAssert_reprcompare_attrsclass.test_attrs_with_attribute_comparison_off.SimpleDataObject:2", "TestAssert_reprcompare_attrsclass.test_comparing_two_different_attrs_classes"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssert_reprcompare_attrsclass(object):\n\n def test_attrs_with_attribute_comparison_off(self):\n @attr.s\n class SimpleDataObject(object):\n field_a = attr.ib()\n field_b = attr.ib(cmp=False)\n\n left = SimpleDataObject(1, \"b\")\n right = SimpleDataObject(1, \"b\")\n\n lines = callequal(left, right, verbose=2)\n assert lines[1].startswith(\"Matching attributes:\")\n assert \"Omitting\" not in lines[1]\n assert lines[2] == \"['field_a']\"\n for line in lines[2:]:\n assert \"field_b\" not in line\n\n def test_comparing_two_different_attrs_classes(self):\n @attr.s\n class SimpleDataObjectOne(object):\n field_a = attr.ib()\n field_b = attr.ib()\n\n @attr.s\n class SimpleDataObjectTwo(object):\n field_a = attr.ib()\n field_b = attr.ib()\n\n left = SimpleDataObjectOne(1, \"b\")\n right = SimpleDataObjectTwo(1, \"c\")\n\n lines = callequal(left, right)\n assert lines is None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestFormatExplanation_TestFormatExplanation.test_fmt_newline_escaped.assert_util_format_explan": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestFormatExplanation_TestFormatExplanation.test_fmt_newline_escaped.assert_util_format_explan", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 736, "end_line": 776, "span_ids": ["TestFormatExplanation.test_fmt_newline", "TestFormatExplanation.test_fmt_where_nested", "TestFormatExplanation.test_fmt_where", "TestFormatExplanation", "TestFormatExplanation.test_fmt_newline_escaped", "TestFormatExplanation.test_fmt_simple", "TestFormatExplanation.test_special_chars_full", "TestFormatExplanation.test_fmt_and"], "tokens": 460}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormatExplanation(object):\n def test_special_chars_full(self, testdir):\n # Issue 453, for the bug this would raise IndexError\n testdir.makepyfile(\n \"\"\"\n def test_foo():\n assert '\\\\n}' == ''\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret == 1\n result.stdout.fnmatch_lines([\"*AssertionError*\"])\n\n def test_fmt_simple(self):\n expl = \"assert foo\"\n assert util.format_explanation(expl) == \"assert foo\"\n\n def test_fmt_where(self):\n expl = \"\\n\".join([\"assert 1\", \"{1 = foo\", \"} == 2\"])\n res = \"\\n\".join([\"assert 1 == 2\", \" + where 1 = foo\"])\n assert util.format_explanation(expl) == res\n\n def test_fmt_and(self):\n expl = \"\\n\".join([\"assert 1\", \"{1 = foo\", \"} == 2\", \"{2 = bar\", \"}\"])\n res = \"\\n\".join([\"assert 1 == 2\", \" + where 1 = foo\", \" + and 2 = bar\"])\n assert util.format_explanation(expl) == res\n\n def test_fmt_where_nested(self):\n expl = \"\\n\".join([\"assert 1\", \"{1 = foo\", \"{foo = bar\", \"}\", \"} == 2\"])\n res = \"\\n\".join([\"assert 1 == 2\", \" + where 1 = foo\", \" + where foo = bar\"])\n assert util.format_explanation(expl) == res\n\n def test_fmt_newline(self):\n expl = \"\\n\".join(['assert \"foo\" == \"bar\"', \"~- foo\", \"~+ bar\"])\n res = \"\\n\".join(['assert \"foo\" == \"bar\"', \" - foo\", \" + bar\"])\n assert util.format_explanation(expl) == res\n\n def test_fmt_newline_escaped(self):\n expl = \"\\n\".join([\"assert foo == bar\", \"baz\"])\n res = \"assert foo == bar\\\\nbaz\"\n assert util.format_explanation(expl) == res", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestFormatExplanation.test_fmt_newline_before_where_TestFormatExplanation.test_fmt_newline_before_where.assert_util_format_explan": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestFormatExplanation.test_fmt_newline_before_where_TestFormatExplanation.test_fmt_newline_before_where.assert_util_format_explan", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 778, "end_line": 797, "span_ids": ["TestFormatExplanation.test_fmt_newline_before_where"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormatExplanation(object):\n\n def test_fmt_newline_before_where(self):\n expl = \"\\n\".join(\n [\n \"the assertion message here\",\n \">assert 1\",\n \"{1 = foo\",\n \"} == 2\",\n \"{2 = bar\",\n \"}\",\n ]\n )\n res = \"\\n\".join(\n [\n \"the assertion message here\",\n \"assert 1 == 2\",\n \" + where 1 = foo\",\n \" + and 2 = bar\",\n ]\n )\n assert util.format_explanation(expl) == res", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestFormatExplanation.test_fmt_multi_newline_before_where_TestFormatExplanation.test_fmt_multi_newline_before_where.assert_util_format_explan": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestFormatExplanation.test_fmt_multi_newline_before_where_TestFormatExplanation.test_fmt_multi_newline_before_where.assert_util_format_explan", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 799, "end_line": 820, "span_ids": ["TestFormatExplanation.test_fmt_multi_newline_before_where"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFormatExplanation(object):\n\n def test_fmt_multi_newline_before_where(self):\n expl = \"\\n\".join(\n [\n \"the assertion\",\n \"~message here\",\n \">assert 1\",\n \"{1 = foo\",\n \"} == 2\",\n \"{2 = bar\",\n \"}\",\n ]\n )\n res = \"\\n\".join(\n [\n \"the assertion\",\n \" message here\",\n \"assert 1 == 2\",\n \" + where 1 = foo\",\n \" + and 2 = bar\",\n ]\n )\n assert util.format_explanation(expl) == res", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestTruncateExplanation_TestTruncateExplanation.test_doesnt_truncate_at_when_input_is_5_lines_and_LT_max_chars.assert_result_expl": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestTruncateExplanation_TestTruncateExplanation.test_doesnt_truncate_at_when_input_is_5_lines_and_LT_max_chars.assert_result_expl", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 823, "end_line": 839, "span_ids": ["TestTruncateExplanation.test_doesnt_truncate_at_when_input_is_5_lines_and_LT_max_chars", "TestTruncateExplanation", "TestTruncateExplanation.test_doesnt_truncate_when_input_is_empty_list"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTruncateExplanation(object):\n\n \"\"\" Confirm assertion output is truncated as expected \"\"\"\n\n # The number of lines in the truncation explanation message. Used\n # to calculate that results have the expected length.\n LINES_IN_TRUNCATION_MSG = 2\n\n def test_doesnt_truncate_when_input_is_empty_list(self):\n expl = []\n result = truncate._truncate_explanation(expl, max_lines=8, max_chars=100)\n assert result == expl\n\n def test_doesnt_truncate_at_when_input_is_5_lines_and_LT_max_chars(self):\n expl = [\"a\" * 100 for x in range(5)]\n result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8 * 80)\n assert result == expl", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestTruncateExplanation.test_truncates_at_8_lines_when_given_list_of_empty_strings_TestTruncateExplanation.test_truncates_at_8_lines_when_given_list_of_empty_strings.assert_last_line_before_t": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestTruncateExplanation.test_truncates_at_8_lines_when_given_list_of_empty_strings_TestTruncateExplanation.test_truncates_at_8_lines_when_given_list_of_empty_strings.assert_last_line_before_t", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 841, "end_line": 849, "span_ids": ["TestTruncateExplanation.test_truncates_at_8_lines_when_given_list_of_empty_strings"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTruncateExplanation(object):\n\n def test_truncates_at_8_lines_when_given_list_of_empty_strings(self):\n expl = [\"\" for x in range(50)]\n result = truncate._truncate_explanation(expl, max_lines=8, max_chars=100)\n assert result != expl\n assert len(result) == 8 + self.LINES_IN_TRUNCATION_MSG\n assert \"Full output truncated\" in result[-1]\n assert \"43 lines hidden\" in result[-1]\n last_line_before_trunc_msg = result[-self.LINES_IN_TRUNCATION_MSG - 1]\n assert last_line_before_trunc_msg.endswith(\"...\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestTruncateExplanation.test_truncates_at_8_lines_when_first_8_lines_are_LT_max_chars_TestTruncateExplanation.test_truncates_at_8_lines_when_first_8_lines_are_LT_max_chars.assert_last_line_before_t": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestTruncateExplanation.test_truncates_at_8_lines_when_first_8_lines_are_LT_max_chars_TestTruncateExplanation.test_truncates_at_8_lines_when_first_8_lines_are_LT_max_chars.assert_last_line_before_t", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 851, "end_line": 859, "span_ids": ["TestTruncateExplanation.test_truncates_at_8_lines_when_first_8_lines_are_LT_max_chars"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTruncateExplanation(object):\n\n def test_truncates_at_8_lines_when_first_8_lines_are_LT_max_chars(self):\n expl = [\"a\" for x in range(100)]\n result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8 * 80)\n assert result != expl\n assert len(result) == 8 + self.LINES_IN_TRUNCATION_MSG\n assert \"Full output truncated\" in result[-1]\n assert \"93 lines hidden\" in result[-1]\n last_line_before_trunc_msg = result[-self.LINES_IN_TRUNCATION_MSG - 1]\n assert last_line_before_trunc_msg.endswith(\"...\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestTruncateExplanation.test_truncates_at_8_lines_when_first_8_lines_are_EQ_max_chars_TestTruncateExplanation.test_truncates_at_8_lines_when_first_8_lines_are_EQ_max_chars.assert_last_line_before_t": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestTruncateExplanation.test_truncates_at_8_lines_when_first_8_lines_are_EQ_max_chars_TestTruncateExplanation.test_truncates_at_8_lines_when_first_8_lines_are_EQ_max_chars.assert_last_line_before_t", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 861, "end_line": 869, "span_ids": ["TestTruncateExplanation.test_truncates_at_8_lines_when_first_8_lines_are_EQ_max_chars"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTruncateExplanation(object):\n\n def test_truncates_at_8_lines_when_first_8_lines_are_EQ_max_chars(self):\n expl = [\"a\" * 80 for x in range(16)]\n result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8 * 80)\n assert result != expl\n assert len(result) == 8 + self.LINES_IN_TRUNCATION_MSG\n assert \"Full output truncated\" in result[-1]\n assert \"9 lines hidden\" in result[-1]\n last_line_before_trunc_msg = result[-self.LINES_IN_TRUNCATION_MSG - 1]\n assert last_line_before_trunc_msg.endswith(\"...\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestTruncateExplanation.test_truncates_at_4_lines_when_first_4_lines_are_GT_max_chars_TestTruncateExplanation.test_truncates_at_4_lines_when_first_4_lines_are_GT_max_chars.assert_last_line_before_t": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestTruncateExplanation.test_truncates_at_4_lines_when_first_4_lines_are_GT_max_chars_TestTruncateExplanation.test_truncates_at_4_lines_when_first_4_lines_are_GT_max_chars.assert_last_line_before_t", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 871, "end_line": 879, "span_ids": ["TestTruncateExplanation.test_truncates_at_4_lines_when_first_4_lines_are_GT_max_chars"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTruncateExplanation(object):\n\n def test_truncates_at_4_lines_when_first_4_lines_are_GT_max_chars(self):\n expl = [\"a\" * 250 for x in range(10)]\n result = truncate._truncate_explanation(expl, max_lines=8, max_chars=999)\n assert result != expl\n assert len(result) == 4 + self.LINES_IN_TRUNCATION_MSG\n assert \"Full output truncated\" in result[-1]\n assert \"7 lines hidden\" in result[-1]\n last_line_before_trunc_msg = result[-self.LINES_IN_TRUNCATION_MSG - 1]\n assert last_line_before_trunc_msg.endswith(\"...\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestTruncateExplanation.test_truncates_at_1_line_when_first_line_is_GT_max_chars_TestTruncateExplanation.test_truncates_at_1_line_when_first_line_is_GT_max_chars.assert_last_line_before_t": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestTruncateExplanation.test_truncates_at_1_line_when_first_line_is_GT_max_chars_TestTruncateExplanation.test_truncates_at_1_line_when_first_line_is_GT_max_chars.assert_last_line_before_t", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 881, "end_line": 889, "span_ids": ["TestTruncateExplanation.test_truncates_at_1_line_when_first_line_is_GT_max_chars"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTruncateExplanation(object):\n\n def test_truncates_at_1_line_when_first_line_is_GT_max_chars(self):\n expl = [\"a\" * 250 for x in range(1000)]\n result = truncate._truncate_explanation(expl, max_lines=8, max_chars=100)\n assert result != expl\n assert len(result) == 1 + self.LINES_IN_TRUNCATION_MSG\n assert \"Full output truncated\" in result[-1]\n assert \"1000 lines hidden\" in result[-1]\n last_line_before_trunc_msg = result[-self.LINES_IN_TRUNCATION_MSG - 1]\n assert last_line_before_trunc_msg.endswith(\"...\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestTruncateExplanation.test_full_output_truncated_TestTruncateExplanation.test_full_output_truncated.None_5": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_TestTruncateExplanation.test_full_output_truncated_TestTruncateExplanation.test_full_output_truncated.None_5", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 891, "end_line": 926, "span_ids": ["TestTruncateExplanation.test_full_output_truncated"], "tokens": 284}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTruncateExplanation(object):\n\n def test_full_output_truncated(self, monkeypatch, testdir):\n \"\"\" Test against full runpytest() output. \"\"\"\n\n line_count = 7\n line_len = 100\n expected_truncated_lines = 2\n testdir.makepyfile(\n r\"\"\"\n def test_many_lines():\n a = list([str(i)[0] * %d for i in range(%d)])\n b = a[::2]\n a = '\\n'.join(map(str, a))\n b = '\\n'.join(map(str, b))\n assert a == b\n \"\"\"\n % (line_len, line_count)\n )\n monkeypatch.delenv(\"CI\", raising=False)\n\n result = testdir.runpytest()\n # without -vv, truncate the message showing a few diff lines only\n result.stdout.fnmatch_lines(\n [\n \"*- 1*\",\n \"*- 3*\",\n \"*- 5*\",\n \"*truncated (%d lines hidden)*use*-vv*\" % expected_truncated_lines,\n ]\n )\n\n result = testdir.runpytest(\"-vv\")\n result.stdout.fnmatch_lines([\"* 6*\"])\n\n monkeypatch.setenv(\"CI\", \"1\")\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"* 6*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_python25_compile_issue257_test_pytest_assertrepr_compare_integration.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_python25_compile_issue257_test_pytest_assertrepr_compare_integration.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 929, "end_line": 988, "span_ids": ["test_reprcompare_notin", "test_rewritten", "test_pytest_assertrepr_compare_integration", "test_python25_compile_issue257", "test_reprcompare_whitespaces"], "tokens": 384}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_python25_compile_issue257(testdir):\n testdir.makepyfile(\n \"\"\"\n def test_rewritten():\n assert 1 == 2\n # some comment\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret == 1\n result.stdout.fnmatch_lines(\n \"\"\"\n *E*assert 1 == 2*\n *1 failed*\n \"\"\"\n )\n\n\ndef test_rewritten(testdir):\n testdir.makepyfile(\n \"\"\"\n def test_rewritten():\n assert \"@py_builtins\" in globals()\n \"\"\"\n )\n assert testdir.runpytest().ret == 0\n\n\ndef test_reprcompare_notin():\n config = mock_config()\n detail = plugin.pytest_assertrepr_compare(config, \"not in\", \"foo\", \"aaafoobbb\")[1:]\n assert detail == [\"'foo' is contained here:\", \" aaafoobbb\", \"? +++\"]\n\n\ndef test_reprcompare_whitespaces():\n config = mock_config()\n detail = plugin.pytest_assertrepr_compare(config, \"==\", \"\\r\\n\", \"\\n\")\n assert detail == [\n r\"'\\r\\n' == '\\n'\",\n r\"Strings contain only whitespace, escaping them using repr()\",\n r\"- '\\r\\n'\",\n r\"? --\",\n r\"+ '\\n'\",\n ]\n\n\ndef test_pytest_assertrepr_compare_integration(testdir):\n testdir.makepyfile(\n \"\"\"\n def test_hello():\n x = set(range(100))\n y = x.copy()\n y.remove(50)\n assert x == y\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\"*def test_hello():*\", \"*assert x == y*\", \"*E*Extra items*left*\", \"*E*50*\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_sequence_comparison_uses_repr_test_sequence_comparison_uses_repr.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_sequence_comparison_uses_repr_test_sequence_comparison_uses_repr.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 991, "end_line": 1010, "span_ids": ["test_sequence_comparison_uses_repr"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_sequence_comparison_uses_repr(testdir):\n testdir.makepyfile(\n \"\"\"\n def test_hello():\n x = set(\"hello x\")\n y = set(\"hello y\")\n assert x == y\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"*def test_hello():*\",\n \"*assert x == y*\",\n \"*E*Extra items*left*\",\n \"*E*'x'*\",\n \"*E*Extra items*right*\",\n \"*E*'y'*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_assertrepr_loaded_per_dir_test_assertrepr_loaded_per_dir.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_assertrepr_loaded_per_dir_test_assertrepr_loaded_per_dir.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 1013, "end_line": 1035, "span_ids": ["test_assertrepr_loaded_per_dir"], "tokens": 243}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_assertrepr_loaded_per_dir(testdir):\n testdir.makepyfile(test_base=[\"def test_base(): assert 1 == 2\"])\n a = testdir.mkdir(\"a\")\n a_test = a.join(\"test_a.py\")\n a_test.write(\"def test_a(): assert 1 == 2\")\n a_conftest = a.join(\"conftest.py\")\n a_conftest.write('def pytest_assertrepr_compare(): return [\"summary a\"]')\n b = testdir.mkdir(\"b\")\n b_test = b.join(\"test_b.py\")\n b_test.write(\"def test_b(): assert 1 == 2\")\n b_conftest = b.join(\"conftest.py\")\n b_conftest.write('def pytest_assertrepr_compare(): return [\"summary b\"]')\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"*def test_base():*\",\n \"*E*assert 1 == 2*\",\n \"*def test_a():*\",\n \"*E*assert summary a*\",\n \"*def test_b():*\",\n \"*E*assert summary b*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_assertion_options_test_triple_quoted_string_issue113.assert_SyntaxError_not_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_assertion_options_test_triple_quoted_string_issue113.assert_SyntaxError_not_", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 1038, "end_line": 1061, "span_ids": ["test_assertion_options", "test_triple_quoted_string_issue113"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_assertion_options(testdir):\n testdir.makepyfile(\n \"\"\"\n def test_hello():\n x = 3\n assert x == 4\n \"\"\"\n )\n result = testdir.runpytest()\n assert \"3 == 4\" in result.stdout.str()\n result = testdir.runpytest_subprocess(\"--assert=plain\")\n assert \"3 == 4\" not in result.stdout.str()\n\n\ndef test_triple_quoted_string_issue113(testdir):\n testdir.makepyfile(\n \"\"\"\n def test_hello():\n assert \"\" == '''\n '''\"\"\"\n )\n result = testdir.runpytest(\"--fulltrace\")\n result.stdout.fnmatch_lines([\"*1 failed*\"])\n assert \"SyntaxError\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_traceback_failure_test_traceback_failure.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_traceback_failure_test_traceback_failure.None_1", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 1064, "end_line": 1116, "span_ids": ["test_traceback_failure"], "tokens": 342}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_traceback_failure(testdir):\n p1 = testdir.makepyfile(\n \"\"\"\n def g():\n return 2\n def f(x):\n assert x == g()\n def test_onefails():\n f(3)\n \"\"\"\n )\n result = testdir.runpytest(p1, \"--tb=long\")\n result.stdout.fnmatch_lines(\n [\n \"*test_traceback_failure.py F*\",\n \"====* FAILURES *====\",\n \"____*____\",\n \"\",\n \" def test_onefails():\",\n \"> f(3)\",\n \"\",\n \"*test_*.py:6: \",\n \"_ _ _ *\",\n # \"\",\n \" def f(x):\",\n \"> assert x == g()\",\n \"E assert 3 == 2\",\n \"E + where 2 = g()\",\n \"\",\n \"*test_traceback_failure.py:4: AssertionError\",\n ]\n )\n\n result = testdir.runpytest(p1) # \"auto\"\n result.stdout.fnmatch_lines(\n [\n \"*test_traceback_failure.py F*\",\n \"====* FAILURES *====\",\n \"____*____\",\n \"\",\n \" def test_onefails():\",\n \"> f(3)\",\n \"\",\n \"*test_*.py:6: \",\n \"\",\n \" def f(x):\",\n \"> assert x == g()\",\n \"E assert 3 == 2\",\n \"E + where 2 = g()\",\n \"\",\n \"*test_traceback_failure.py:4: AssertionError\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_exception_handling_no_traceback_test_exception_handling_no_traceback.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_exception_handling_no_traceback_test_exception_handling_no_traceback.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 1119, "end_line": 1153, "span_ids": ["test_exception_handling_no_traceback"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n sys.version_info[:2] <= (3, 3),\n reason=\"Python 3.4+ shows chained exceptions on multiprocess\",\n)\ndef test_exception_handling_no_traceback(testdir):\n \"\"\"\n Handle chain exceptions in tasks submitted by the multiprocess module (#1984).\n \"\"\"\n p1 = testdir.makepyfile(\n \"\"\"\n from multiprocessing import Pool\n\n def process_task(n):\n assert n == 10\n\n def multitask_job():\n tasks = [1]\n with Pool(processes=1) as pool:\n pool.map(process_task, tasks)\n\n def test_multitask_job():\n multitask_job()\n \"\"\"\n )\n result = testdir.runpytest(p1, \"--tb=long\")\n result.stdout.fnmatch_lines(\n [\n \"====* FAILURES *====\",\n \"*multiprocessing.pool.RemoteTraceback:*\",\n \"Traceback (most recent call last):\",\n \"*assert n == 10\",\n \"The above exception was the direct cause of the following exception:\",\n \"> * multitask_job()\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_warn_missing_test_warn_missing.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_warn_missing_test_warn_missing.None_2", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 1156, "end_line": 1164, "span_ids": ["test_warn_missing"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n \"'__pypy__' in sys.builtin_module_names or sys.platform.startswith('java')\"\n)\ndef test_warn_missing(testdir):\n testdir.makepyfile(\"\")\n result = testdir.run(sys.executable, \"-OO\", \"-m\", \"pytest\", \"-h\")\n result.stderr.fnmatch_lines([\"*WARNING*assert statements are not executed*\"])\n result = testdir.run(sys.executable, \"-OO\", \"-m\", \"pytest\")\n result.stderr.fnmatch_lines([\"*WARNING*assert statements are not executed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_recursion_source_decode_test_AssertionError_message.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_recursion_source_decode_test_AssertionError_message.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 1167, "end_line": 1203, "span_ids": ["test_recursion_source_decode", "test_AssertionError_message"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_recursion_source_decode(testdir):\n testdir.makepyfile(\n \"\"\"\n def test_something():\n pass\n \"\"\"\n )\n testdir.makeini(\n \"\"\"\n [pytest]\n python_files = *.py\n \"\"\"\n )\n result = testdir.runpytest(\"--collect-only\")\n result.stdout.fnmatch_lines(\n \"\"\"\n \n \"\"\"\n )\n\n\ndef test_AssertionError_message(testdir):\n testdir.makepyfile(\n \"\"\"\n def test_hello():\n x,y = 1,2\n assert 0, (x,y)\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n \"\"\"\n *def test_hello*\n *assert 0, (x,y)*\n *AssertionError: (1, 2)*\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_set_with_unsortable_elements_test_set_with_unsortable_elements.assert_n_join_expl_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_set_with_unsortable_elements_test_set_with_unsortable_elements.assert_n_join_expl_", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 1206, "end_line": 1242, "span_ids": ["test_set_with_unsortable_elements", "test_set_with_unsortable_elements.UnsortableKey.__init__", "test_set_with_unsortable_elements.UnsortableKey"], "tokens": 288}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(PY3, reason=\"This bug does not exist on PY3\")\ndef test_set_with_unsortable_elements():\n # issue #718\n class UnsortableKey(object):\n def __init__(self, name):\n self.name = name\n\n def __lt__(self, other):\n raise RuntimeError()\n\n def __repr__(self):\n return \"repr({})\".format(self.name)\n\n def __eq__(self, other):\n return self.name == other.name\n\n def __hash__(self):\n return hash(self.name)\n\n left_set = {UnsortableKey(str(i)) for i in range(1, 3)}\n right_set = {UnsortableKey(str(i)) for i in range(2, 4)}\n expl = callequal(left_set, right_set, verbose=True)\n # skip first line because it contains the \"construction\" of the set, which does not have a guaranteed order\n expl = expl[1:]\n dedent = textwrap.dedent(\n \"\"\"\n Extra items in the left set:\n repr(1)\n Extra items in the right set:\n repr(3)\n Full diff (fallback to calling repr on each item):\n - repr(1)\n repr(2)\n + repr(3)\n \"\"\"\n ).strip()\n assert \"\\n\".join(expl) == dedent", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_diff_newline_at_end_test_assert_tuple_warning.assert_msg_not_in_result_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_diff_newline_at_end_test_assert_tuple_warning.assert_msg_not_in_result_", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 1245, "end_line": 1284, "span_ids": ["test_assert_tuple_warning", "test_diff_newline_at_end"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_diff_newline_at_end(monkeypatch, testdir):\n testdir.makepyfile(\n r\"\"\"\n def test_diff():\n assert 'asdf' == 'asdf\\n'\n \"\"\"\n )\n\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n r\"\"\"\n *assert 'asdf' == 'asdf\\n'\n * - asdf\n * + asdf\n * ? +\n \"\"\"\n )\n\n\n@pytest.mark.filterwarnings(\"default\")\ndef test_assert_tuple_warning(testdir):\n msg = \"assertion is always true\"\n testdir.makepyfile(\n \"\"\"\n def test_tuple():\n assert(False, 'you shall not pass')\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*test_assert_tuple_warning.py:2:*{}*\".format(msg)])\n\n # tuples with size != 2 should not trigger the warning\n testdir.makepyfile(\n \"\"\"\n def test_tuple():\n assert ()\n \"\"\"\n )\n result = testdir.runpytest()\n assert msg not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_assert_indirect_tuple_no_warning_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertion.py_test_assert_indirect_tuple_no_warning_", "embedding": null, "metadata": {"file_path": "testing/test_assertion.py", "file_name": "test_assertion.py", "file_type": "text/x-python", "category": "test", "start_line": 1287, "end_line": 1366, "span_ids": ["test_raise_assertion_error_raisin_repr", "test_raise_unprintable_assertion_error", "test_assert_indirect_tuple_no_warning", "test_issue_1944", "test_exit_from_assertrepr_compare", "test_assert_with_unicode"], "tokens": 462}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_assert_indirect_tuple_no_warning(testdir):\n testdir.makepyfile(\n \"\"\"\n def test_tuple():\n tpl = ('foo', 'bar')\n assert tpl\n \"\"\"\n )\n result = testdir.runpytest(\"-rw\")\n output = \"\\n\".join(result.stdout.lines)\n assert \"WR1\" not in output\n\n\ndef test_assert_with_unicode(monkeypatch, testdir):\n testdir.makepyfile(\n u\"\"\"\n # -*- coding: utf-8 -*-\n def test_unicode():\n assert u'\uc720\ub2c8\ucf54\ub4dc' == u'Unicode'\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*AssertionError*\"])\n\n\ndef test_raise_unprintable_assertion_error(testdir):\n testdir.makepyfile(\n r\"\"\"\n def test_raise_assertion_error():\n raise AssertionError('\\xff')\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [r\"> raise AssertionError('\\xff')\", \"E AssertionError: *\"]\n )\n\n\ndef test_raise_assertion_error_raisin_repr(testdir):\n testdir.makepyfile(\n u\"\"\"\n class RaisingRepr(object):\n def __repr__(self):\n raise Exception()\n def test_raising_repr():\n raise AssertionError(RaisingRepr())\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\"E AssertionError: \"]\n )\n\n\ndef test_issue_1944(testdir):\n testdir.makepyfile(\n \"\"\"\n def f():\n return\n\n assert f() == 10\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*1 error*\"])\n assert (\n \"AttributeError: 'Module' object has no attribute '_obj'\"\n not in result.stdout.str()\n )\n\n\ndef test_exit_from_assertrepr_compare(monkeypatch):\n def raise_exit(obj):\n outcomes.exit(\"Quitting debugger\")\n\n monkeypatch.setattr(util, \"istext\", raise_exit)\n\n with pytest.raises(outcomes.Exit, match=\"Quitting debugger\"):\n callequal(1, 1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py__coding_utf_8__rewrite.return.tree": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py__coding_utf_8__rewrite.return.tree", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 44, "span_ids": ["impl", "teardown_module", "setup_module", "docstring", "rewrite", "imports"], "tokens": 238}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport glob\nimport os\nimport py_compile\nimport stat\nimport sys\nimport textwrap\nimport zipfile\n\nimport py\nimport six\n\nimport _pytest._code\nimport pytest\nfrom _pytest.assertion import util\nfrom _pytest.assertion.rewrite import AssertionRewritingHook\nfrom _pytest.assertion.rewrite import PYTEST_TAG\nfrom _pytest.assertion.rewrite import rewrite_asserts\nfrom _pytest.main import EXIT_NOTESTSCOLLECTED\n\nast = pytest.importorskip(\"ast\")\nif sys.platform.startswith(\"java\"):\n # XXX should be xfail\n pytest.skip(\"assert rewrite does currently not work on jython\")\n\n\ndef setup_module(mod):\n mod._old_reprcompare = util._reprcompare\n _pytest._code._reprcompare = None\n\n\ndef teardown_module(mod):\n util._reprcompare = mod._old_reprcompare\n del mod._old_reprcompare\n\n\ndef rewrite(src):\n tree = ast.parse(src)\n rewrite_asserts(tree)\n return tree", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_getmsg_getmsg.try_.else_.if_not_must_pass_.pytest_fail_function_did": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_getmsg_getmsg.try_.else_.if_not_must_pass_.pytest_fail_function_did", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 47, "end_line": 68, "span_ids": ["getmsg"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def getmsg(f, extra_ns=None, must_pass=False):\n \"\"\"Rewrite the assertions in f, run it, and get the failure message.\"\"\"\n src = \"\\n\".join(_pytest._code.Code(f).source().lines)\n mod = rewrite(src)\n code = compile(mod, \"\", \"exec\")\n ns = {}\n if extra_ns is not None:\n ns.update(extra_ns)\n six.exec_(code, ns)\n func = ns[f.__name__]\n try:\n func()\n except AssertionError:\n if must_pass:\n pytest.fail(\"shouldn't have raised\")\n s = six.text_type(sys.exc_info()[1])\n if not s.startswith(\"assert\"):\n return \"AssertionError: \" + s\n return s\n else:\n if not must_pass:\n pytest.fail(\"function didn't raise at all\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite_TestAssertionRewrite.test_place_initial_imports.None_9": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite_TestAssertionRewrite.test_place_initial_imports.None_9", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 71, "end_line": 112, "span_ids": ["TestAssertionRewrite", "TestAssertionRewrite.test_place_initial_imports"], "tokens": 420}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionRewrite(object):\n def test_place_initial_imports(self):\n s = \"\"\"'Doc string'\\nother = stuff\"\"\"\n m = rewrite(s)\n assert isinstance(m.body[0], ast.Expr)\n for imp in m.body[1:3]:\n assert isinstance(imp, ast.Import)\n assert imp.lineno == 2\n assert imp.col_offset == 0\n assert isinstance(m.body[3], ast.Assign)\n s = \"\"\"from __future__ import division\\nother_stuff\"\"\"\n m = rewrite(s)\n assert isinstance(m.body[0], ast.ImportFrom)\n for imp in m.body[1:3]:\n assert isinstance(imp, ast.Import)\n assert imp.lineno == 2\n assert imp.col_offset == 0\n assert isinstance(m.body[3], ast.Expr)\n s = \"\"\"'doc string'\\nfrom __future__ import division\"\"\"\n m = rewrite(s)\n assert isinstance(m.body[0], ast.Expr)\n assert isinstance(m.body[1], ast.ImportFrom)\n for imp in m.body[2:4]:\n assert isinstance(imp, ast.Import)\n assert imp.lineno == 2\n assert imp.col_offset == 0\n s = \"\"\"'doc string'\\nfrom __future__ import division\\nother\"\"\"\n m = rewrite(s)\n assert isinstance(m.body[0], ast.Expr)\n assert isinstance(m.body[1], ast.ImportFrom)\n for imp in m.body[2:4]:\n assert isinstance(imp, ast.Import)\n assert imp.lineno == 3\n assert imp.col_offset == 0\n assert isinstance(m.body[4], ast.Expr)\n s = \"\"\"from . import relative\\nother_stuff\"\"\"\n m = rewrite(s)\n for imp in m.body[:2]:\n assert isinstance(imp, ast.Import)\n assert imp.lineno == 1\n assert imp.col_offset == 0\n assert isinstance(m.body[3], ast.Expr)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_dont_rewrite_TestAssertionRewrite.test_dont_rewrite_plugin.assert_warnings_not_in_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_dont_rewrite_TestAssertionRewrite.test_dont_rewrite_plugin.assert_warnings_not_in_", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 114, "end_line": 128, "span_ids": ["TestAssertionRewrite.test_dont_rewrite", "TestAssertionRewrite.test_dont_rewrite_plugin"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionRewrite(object):\n\n def test_dont_rewrite(self):\n s = \"\"\"'PYTEST_DONT_REWRITE'\\nassert 14\"\"\"\n m = rewrite(s)\n assert len(m.body) == 2\n assert m.body[1].msg is None\n\n def test_dont_rewrite_plugin(self, testdir):\n contents = {\n \"conftest.py\": \"pytest_plugins = 'plugin'; import plugin\",\n \"plugin.py\": \"'PYTEST_DONT_REWRITE'\",\n \"test_foo.py\": \"def test_foo(): pass\",\n }\n testdir.makepyfile(**contents)\n result = testdir.runpytest_subprocess()\n assert \"warnings\" not in \"\".join(result.outlines)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_name_TestAssertionRewrite.test_name.None_1.else_.assert_msg_assert_cl": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_name_TestAssertionRewrite.test_name.None_1.else_.assert_msg_assert_cl", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 130, "end_line": 182, "span_ids": ["TestAssertionRewrite.test_name", "TestAssertionRewrite.test_name.X:2", "TestAssertionRewrite.test_name.X"], "tokens": 353}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionRewrite(object):\n\n def test_name(self, request):\n def f():\n assert False\n\n assert getmsg(f) == \"assert False\"\n\n def f():\n f = False\n assert f\n\n assert getmsg(f) == \"assert False\"\n\n def f():\n assert a_global # noqa\n\n assert getmsg(f, {\"a_global\": False}) == \"assert False\"\n\n def f():\n assert sys == 42\n\n verbose = request.config.getoption(\"verbose\")\n msg = getmsg(f, {\"sys\": sys})\n if verbose > 0:\n assert msg == (\n \"assert == 42\\n\"\n \" -\\n\"\n \" +42\"\n )\n else:\n assert msg == \"assert sys == 42\"\n\n def f():\n assert cls == 42 # noqa: F821\n\n class X(object):\n pass\n\n msg = getmsg(f, {\"cls\": X}).splitlines()\n if verbose > 0:\n if six.PY2:\n assert msg == [\n \"assert == 42\",\n \" -\",\n \" +42\",\n ]\n else:\n assert msg == [\n \"assert .X'> == 42\",\n \" -.X'>\",\n \" +42\",\n ]\n else:\n assert msg == [\"assert cls == 42\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_dont_rewrite_if_hasattr_fails_TestAssertionRewrite.test_dont_rewrite_if_hasattr_fails.if_request_config_getopti.else_.assert_message_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_dont_rewrite_if_hasattr_fails_TestAssertionRewrite.test_dont_rewrite_if_hasattr_fails.if_request_config_getopti.else_.assert_message_", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 184, "end_line": 209, "span_ids": ["TestAssertionRewrite.test_dont_rewrite_if_hasattr_fails.Y:2", "TestAssertionRewrite.test_dont_rewrite_if_hasattr_fails", "TestAssertionRewrite.test_dont_rewrite_if_hasattr_fails.Y"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionRewrite(object):\n\n def test_dont_rewrite_if_hasattr_fails(self, request):\n class Y(object):\n \"\"\" A class whos getattr fails, but not with `AttributeError` \"\"\"\n\n def __getattr__(self, attribute_name):\n raise KeyError()\n\n def __repr__(self):\n return \"Y\"\n\n def __init__(self):\n self.foo = 3\n\n def f():\n assert cls().foo == 2 # noqa\n\n # XXX: looks like the \"where\" should also be there in verbose mode?!\n message = getmsg(f, {\"cls\": Y}).splitlines()\n if request.config.getoption(\"verbose\") > 0:\n assert message == [\"assert 3 == 2\", \" -3\", \" +2\"]\n else:\n assert message == [\n \"assert 3 == 2\",\n \" + where 3 = Y.foo\",\n \" + where Y = cls()\",\n ]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_assert_already_has_message_TestAssertionRewrite.test_assertion_messages_bytes.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_assert_already_has_message_TestAssertionRewrite.test_assertion_messages_bytes.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 211, "end_line": 287, "span_ids": ["TestAssertionRewrite.test_assertion_messages_bytes", "TestAssertionRewrite.test_assertion_message_tuple", "TestAssertionRewrite.test_assertion_message_multiline", "TestAssertionRewrite.test_assertion_message", "TestAssertionRewrite.test_assert_already_has_message", "TestAssertionRewrite.test_assertion_message_escape", "TestAssertionRewrite.test_assertion_message_expr"], "tokens": 602}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionRewrite(object):\n\n def test_assert_already_has_message(self):\n def f():\n assert False, \"something bad!\"\n\n assert getmsg(f) == \"AssertionError: something bad!\\nassert False\"\n\n def test_assertion_message(self, testdir):\n testdir.makepyfile(\n \"\"\"\n def test_foo():\n assert 1 == 2, \"The failure message\"\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret == 1\n result.stdout.fnmatch_lines(\n [\"*AssertionError*The failure message*\", \"*assert 1 == 2*\"]\n )\n\n def test_assertion_message_multiline(self, testdir):\n testdir.makepyfile(\n \"\"\"\n def test_foo():\n assert 1 == 2, \"A multiline\\\\nfailure message\"\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret == 1\n result.stdout.fnmatch_lines(\n [\"*AssertionError*A multiline*\", \"*failure message*\", \"*assert 1 == 2*\"]\n )\n\n def test_assertion_message_tuple(self, testdir):\n testdir.makepyfile(\n \"\"\"\n def test_foo():\n assert 1 == 2, (1, 2)\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret == 1\n result.stdout.fnmatch_lines(\n [\"*AssertionError*%s*\" % repr((1, 2)), \"*assert 1 == 2*\"]\n )\n\n def test_assertion_message_expr(self, testdir):\n testdir.makepyfile(\n \"\"\"\n def test_foo():\n assert 1 == 2, 1 + 2\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret == 1\n result.stdout.fnmatch_lines([\"*AssertionError*3*\", \"*assert 1 == 2*\"])\n\n def test_assertion_message_escape(self, testdir):\n testdir.makepyfile(\n \"\"\"\n def test_foo():\n assert 1 == 2, 'To be escaped: %'\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret == 1\n result.stdout.fnmatch_lines(\n [\"*AssertionError: To be escaped: %\", \"*assert 1 == 2\"]\n )\n\n @pytest.mark.skipif(\n sys.version_info < (3,), reason=\"bytes is a string type in python 2\"\n )\n def test_assertion_messages_bytes(self, testdir):\n testdir.makepyfile(\"def test_bytes_assertion():\\n assert False, b'ohai!'\\n\")\n result = testdir.runpytest()\n assert result.ret == 1\n result.stdout.fnmatch_lines([\"*AssertionError: b'ohai!'\", \"*assert False\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_boolop_TestAssertionRewrite.test_boolop.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_boolop_TestAssertionRewrite.test_boolop.None_2", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 289, "end_line": 366, "span_ids": ["TestAssertionRewrite.test_boolop"], "tokens": 403}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionRewrite(object):\n\n def test_boolop(self):\n def f():\n f = g = False\n assert f and g\n\n assert getmsg(f) == \"assert (False)\"\n\n def f():\n f = True\n g = False\n assert f and g\n\n assert getmsg(f) == \"assert (True and False)\"\n\n def f():\n f = False\n g = True\n assert f and g\n\n assert getmsg(f) == \"assert (False)\"\n\n def f():\n f = g = False\n assert f or g\n\n assert getmsg(f) == \"assert (False or False)\"\n\n def f():\n f = g = False\n assert not f and not g\n\n getmsg(f, must_pass=True)\n\n def x():\n return False\n\n def f():\n assert x() and x()\n\n assert (\n getmsg(f, {\"x\": x})\n == \"\"\"assert (False)\n + where False = x()\"\"\"\n )\n\n def f():\n assert False or x()\n\n assert (\n getmsg(f, {\"x\": x})\n == \"\"\"assert (False or False)\n + where False = x()\"\"\"\n )\n\n def f():\n assert 1 in {} and 2 in {}\n\n assert getmsg(f) == \"assert (1 in {})\"\n\n def f():\n x = 1\n y = 2\n assert x in {1: None} and y in {}\n\n assert getmsg(f) == \"assert (1 in {1: None} and 2 in {})\"\n\n def f():\n f = True\n g = False\n assert f or g\n\n getmsg(f, must_pass=True)\n\n def f():\n f = g = h = lambda: True\n assert f() and g() and h()\n\n getmsg(f, must_pass=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_short_circuit_evaluation_TestAssertionRewrite.test_unary_op.None_3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_short_circuit_evaluation_TestAssertionRewrite.test_unary_op.None_3", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 368, "end_line": 403, "span_ids": ["TestAssertionRewrite.test_unary_op", "TestAssertionRewrite.test_short_circuit_evaluation"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionRewrite(object):\n\n def test_short_circuit_evaluation(self):\n def f():\n assert True or explode # noqa\n\n getmsg(f, must_pass=True)\n\n def f():\n x = 1\n assert x == 1 or x == 2\n\n getmsg(f, must_pass=True)\n\n def test_unary_op(self):\n def f():\n x = True\n assert not x\n\n assert getmsg(f) == \"assert not True\"\n\n def f():\n x = 0\n assert ~x + 1\n\n assert getmsg(f) == \"assert (~0 + 1)\"\n\n def f():\n x = 3\n assert -x + x\n\n assert getmsg(f) == \"assert (-3 + 3)\"\n\n def f():\n x = 0\n assert +x + x\n\n assert getmsg(f) == \"assert (+0 + 0)\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_binary_op_TestAssertionRewrite.test_boolop_percent.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_binary_op_TestAssertionRewrite.test_boolop_percent.None_1", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 405, "end_line": 427, "span_ids": ["TestAssertionRewrite.test_binary_op", "TestAssertionRewrite.test_boolop_percent"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionRewrite(object):\n\n def test_binary_op(self):\n def f():\n x = 1\n y = -1\n assert x + y\n\n assert getmsg(f) == \"assert (1 + -1)\"\n\n def f():\n assert not 5 % 4\n\n assert getmsg(f) == \"assert not (5 % 4)\"\n\n def test_boolop_percent(self):\n def f():\n assert 3 % 2 and False\n\n assert getmsg(f) == \"assert ((3 % 2) and False)\"\n\n def f():\n assert False or 4 % 2\n\n assert getmsg(f) == \"assert (False or (4 % 2))\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_at_operator_issue1290_TestAssertionRewrite.test_starred_with_side_effect.testdir_runpytest_asser": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_at_operator_issue1290_TestAssertionRewrite.test_starred_with_side_effect.testdir_runpytest_asser", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 429, "end_line": 455, "span_ids": ["TestAssertionRewrite.test_at_operator_issue1290", "TestAssertionRewrite.test_starred_with_side_effect"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionRewrite(object):\n\n @pytest.mark.skipif(\"sys.version_info < (3,5)\")\n def test_at_operator_issue1290(self, testdir):\n testdir.makepyfile(\n \"\"\"\n class Matrix(object):\n def __init__(self, num):\n self.num = num\n def __matmul__(self, other):\n return self.num * other.num\n\n def test_multmat_operator():\n assert Matrix(2) @ Matrix(3) == 6\"\"\"\n )\n testdir.runpytest().assert_outcomes(passed=1)\n\n @pytest.mark.skipif(\"sys.version_info < (3,5)\")\n def test_starred_with_side_effect(self, testdir):\n \"\"\"See #4412\"\"\"\n testdir.makepyfile(\n \"\"\"\\\n def test():\n f = lambda x: x\n x = iter([1, 2, 3])\n assert 2 * next(x) == f(*[next(x)])\n \"\"\"\n )\n testdir.runpytest().assert_outcomes(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_call_TestAssertionRewrite.test_call.None_6": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_call_TestAssertionRewrite.test_call.None_6", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 457, "end_line": 526, "span_ids": ["TestAssertionRewrite.test_call"], "tokens": 361}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionRewrite(object):\n\n def test_call(self):\n def g(a=42, *args, **kwargs):\n return False\n\n ns = {\"g\": g}\n\n def f():\n assert g()\n\n assert (\n getmsg(f, ns)\n == \"\"\"assert False\n + where False = g()\"\"\"\n )\n\n def f():\n assert g(1)\n\n assert (\n getmsg(f, ns)\n == \"\"\"assert False\n + where False = g(1)\"\"\"\n )\n\n def f():\n assert g(1, 2)\n\n assert (\n getmsg(f, ns)\n == \"\"\"assert False\n + where False = g(1, 2)\"\"\"\n )\n\n def f():\n assert g(1, g=42)\n\n assert (\n getmsg(f, ns)\n == \"\"\"assert False\n + where False = g(1, g=42)\"\"\"\n )\n\n def f():\n assert g(1, 3, g=23)\n\n assert (\n getmsg(f, ns)\n == \"\"\"assert False\n + where False = g(1, 3, g=23)\"\"\"\n )\n\n def f():\n seq = [1, 2, 3]\n assert g(*seq)\n\n assert (\n getmsg(f, ns)\n == \"\"\"assert False\n + where False = g(*[1, 2, 3])\"\"\"\n )\n\n def f():\n x = \"a\"\n assert g(**{x: 2})\n\n assert (\n getmsg(f, ns)\n == \"\"\"assert False\n + where False = g(**{'a': 2})\"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_attribute_TestAssertionRewrite.test_comparisons.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_attribute_TestAssertionRewrite.test_comparisons.None_1", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 528, "end_line": 583, "span_ids": ["TestAssertionRewrite.test_attribute", "TestAssertionRewrite.test_attribute.X:2", "TestAssertionRewrite.test_attribute.X", "TestAssertionRewrite.test_comparisons"], "tokens": 307}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionRewrite(object):\n\n def test_attribute(self):\n class X(object):\n g = 3\n\n ns = {\"x\": X}\n\n def f():\n assert not x.g # noqa\n\n assert (\n getmsg(f, ns)\n == \"\"\"assert not 3\n + where 3 = x.g\"\"\"\n )\n\n def f():\n x.a = False # noqa\n assert x.a # noqa\n\n assert (\n getmsg(f, ns)\n == \"\"\"assert False\n + where False = x.a\"\"\"\n )\n\n def test_comparisons(self):\n def f():\n a, b = range(2)\n assert b < a\n\n assert getmsg(f) == \"\"\"assert 1 < 0\"\"\"\n\n def f():\n a, b, c = range(3)\n assert a > b > c\n\n assert getmsg(f) == \"\"\"assert 0 > 1\"\"\"\n\n def f():\n a, b, c = range(3)\n assert a < b > c\n\n assert getmsg(f) == \"\"\"assert 1 > 2\"\"\"\n\n def f():\n a, b, c = range(3)\n assert a < b <= c\n\n getmsg(f, must_pass=True)\n\n def f():\n a, b, c = range(3)\n assert a < b\n assert b < c\n\n getmsg(f, must_pass=True)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_len_TestAssertionRewrite.test_len.if_request_config_getopti.else_.assert_msg_assert_10_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_len_TestAssertionRewrite.test_len.if_request_config_getopti.else_.assert_msg_assert_10_", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 585, "end_line": 594, "span_ids": ["TestAssertionRewrite.test_len"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionRewrite(object):\n\n def test_len(self, request):\n def f():\n values = list(range(10))\n assert len(values) == 11\n\n msg = getmsg(f)\n if request.config.getoption(\"verbose\") > 0:\n assert msg == \"assert 10 == 11\\n -10\\n +11\"\n else:\n assert msg == \"assert 10 == 11\\n + where 10 = len([0, 1, 2, 3, 4, 5, ...])\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_custom_reprcompare_TestAssertionRewrite.test_custom_reprcompare.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_custom_reprcompare_TestAssertionRewrite.test_custom_reprcompare.None_1", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 596, "end_line": 615, "span_ids": ["TestAssertionRewrite.test_custom_reprcompare"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionRewrite(object):\n\n def test_custom_reprcompare(self, monkeypatch):\n def my_reprcompare(op, left, right):\n return \"42\"\n\n monkeypatch.setattr(util, \"_reprcompare\", my_reprcompare)\n\n def f():\n assert 42 < 3\n\n assert getmsg(f) == \"assert 42\"\n\n def my_reprcompare(op, left, right):\n return \"{} {} {}\".format(left, op, right)\n\n monkeypatch.setattr(util, \"_reprcompare\", my_reprcompare)\n\n def f():\n assert 1 < 3 < 5 <= 4 < 7\n\n assert getmsg(f) == \"assert 5 <= 4\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_assert_raising_nonzero_in_comparison_TestAssertionRewrite.test_formatchar.assert_getmsg_f_startswi": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_assert_raising_nonzero_in_comparison_TestAssertionRewrite.test_formatchar.assert_getmsg_f_startswi", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 617, "end_line": 640, "span_ids": ["TestAssertionRewrite.test_assert_raising_nonzero_in_comparison.f.A.__nonzero__", "TestAssertionRewrite.test_assert_raising_nonzero_in_comparison.f.A", "TestAssertionRewrite.test_assert_raising_nonzero_in_comparison", "TestAssertionRewrite.test_formatchar"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionRewrite(object):\n\n def test_assert_raising_nonzero_in_comparison(self):\n def f():\n class A(object):\n def __nonzero__(self):\n raise ValueError(42)\n\n def __lt__(self, other):\n return A()\n\n def __repr__(self):\n return \"\"\n\n def myany(x):\n return False\n\n assert myany(A() < 0)\n\n assert \" < 0\" in getmsg(f)\n\n def test_formatchar(self):\n def f():\n assert \"%test\" == \"test\"\n\n assert getmsg(f).startswith(\"assert '%test' == 'test'\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_custom_repr_TestAssertionRewrite.test_custom_repr_non_ascii.assert_UnicodeEncodeErro": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewrite.test_custom_repr_TestAssertionRewrite.test_custom_repr_non_ascii.assert_UnicodeEncodeErro", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 642, "end_line": 672, "span_ids": ["TestAssertionRewrite.test_custom_repr.f.Foo", "TestAssertionRewrite.test_custom_repr_non_ascii.f.A:2", "TestAssertionRewrite.test_custom_repr.f.Foo:2", "TestAssertionRewrite.test_custom_repr_non_ascii", "TestAssertionRewrite.test_custom_repr", "TestAssertionRewrite.test_custom_repr_non_ascii.f.A"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionRewrite(object):\n\n def test_custom_repr(self, request):\n def f():\n class Foo(object):\n a = 1\n\n def __repr__(self):\n return \"\\n{ \\n~ \\n}\"\n\n f = Foo()\n assert 0 == f.a\n\n lines = util._format_lines([getmsg(f)])\n if request.config.getoption(\"verbose\") > 0:\n assert lines == [\"assert 0 == 1\\n -0\\n +1\"]\n else:\n assert lines == [\"assert 0 == 1\\n + where 1 = \\\\n{ \\\\n~ \\\\n}.a\"]\n\n def test_custom_repr_non_ascii(self):\n def f():\n class A(object):\n name = u\"\u00e4\"\n\n def __repr__(self):\n return self.name.encode(\"UTF-8\") # only legal in python2\n\n a = A()\n assert not a.name\n\n msg = getmsg(f)\n assert \"UnicodeDecodeError\" not in msg\n assert \"UnicodeEncodeError\" not in msg", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport_TestRewriteOnImport.test_pycache_is_readonly.try_.finally_.cache_chmod_old_mode_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport_TestRewriteOnImport.test_pycache_is_readonly.try_.finally_.cache_chmod_old_mode_", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 675, "end_line": 697, "span_ids": ["TestRewriteOnImport.test_pycache_is_a_file", "TestRewriteOnImport.test_pycache_is_readonly", "TestRewriteOnImport"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRewriteOnImport(object):\n def test_pycache_is_a_file(self, testdir):\n testdir.tmpdir.join(\"__pycache__\").write(\"Hello\")\n testdir.makepyfile(\n \"\"\"\n def test_rewritten():\n assert \"@py_builtins\" in globals()\"\"\"\n )\n assert testdir.runpytest().ret == 0\n\n def test_pycache_is_readonly(self, testdir):\n cache = testdir.tmpdir.mkdir(\"__pycache__\")\n old_mode = cache.stat().mode\n cache.chmod(old_mode ^ stat.S_IWRITE)\n testdir.makepyfile(\n \"\"\"\n def test_rewritten():\n assert \"@py_builtins\" in globals()\"\"\"\n )\n try:\n assert testdir.runpytest().ret == 0\n finally:\n cache.chmod(old_mode)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_zipfile_TestRewriteOnImport.test_zipfile.assert_testdir_runpytest_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_zipfile_TestRewriteOnImport.test_zipfile.assert_testdir_runpytest_", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 699, "end_line": 716, "span_ids": ["TestRewriteOnImport.test_zipfile"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRewriteOnImport(object):\n\n def test_zipfile(self, testdir):\n z = testdir.tmpdir.join(\"myzip.zip\")\n z_fn = str(z)\n f = zipfile.ZipFile(z_fn, \"w\")\n try:\n f.writestr(\"test_gum/__init__.py\", \"\")\n f.writestr(\"test_gum/test_lizard.py\", \"\")\n finally:\n f.close()\n z.chmod(256)\n testdir.makepyfile(\n \"\"\"\n import sys\n sys.path.append(%r)\n import test_gum.test_lizard\"\"\"\n % (z_fn,)\n )\n assert testdir.runpytest().ret == EXIT_NOTESTSCOLLECTED", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_readonly_TestRewriteOnImport.test_dont_write_bytecode.assert_testdir_runpytest_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_readonly_TestRewriteOnImport.test_dont_write_bytecode.assert_testdir_runpytest_", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 718, "end_line": 744, "span_ids": ["TestRewriteOnImport.test_readonly", "TestRewriteOnImport.test_dont_write_bytecode"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRewriteOnImport(object):\n\n def test_readonly(self, testdir):\n sub = testdir.mkdir(\"testing\")\n sub.join(\"test_readonly.py\").write(\n b\"\"\"\ndef test_rewritten():\n assert \"@py_builtins\" in globals()\n \"\"\",\n \"wb\",\n )\n old_mode = sub.stat().mode\n sub.chmod(320)\n try:\n assert testdir.runpytest().ret == 0\n finally:\n sub.chmod(old_mode)\n\n def test_dont_write_bytecode(self, testdir, monkeypatch):\n testdir.makepyfile(\n \"\"\"\n import os\n def test_no_bytecode():\n assert \"__pycache__\" in __cached__\n assert not os.path.exists(__cached__)\n assert not os.path.exists(os.path.dirname(__cached__))\"\"\"\n )\n monkeypatch.setenv(\"PYTHONDONTWRITEBYTECODE\", \"1\")\n assert testdir.runpytest_subprocess().ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_orphaned_pyc_file_TestRewriteOnImport.test_orphaned_pyc_file.assert_testdir_runpytest_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_orphaned_pyc_file_TestRewriteOnImport.test_orphaned_pyc_file.assert_testdir_runpytest_", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 746, "end_line": 773, "span_ids": ["TestRewriteOnImport.test_orphaned_pyc_file"], "tokens": 243}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRewriteOnImport(object):\n\n def test_orphaned_pyc_file(self, testdir):\n if sys.version_info < (3, 0) and hasattr(sys, \"pypy_version_info\"):\n pytest.skip(\"pypy2 doesn't run orphaned pyc files\")\n\n testdir.makepyfile(\n \"\"\"\n import orphan\n def test_it():\n assert orphan.value == 17\n \"\"\"\n )\n testdir.makepyfile(\n orphan=\"\"\"\n value = 17\n \"\"\"\n )\n py_compile.compile(\"orphan.py\")\n os.remove(\"orphan.py\")\n\n # Python 3 puts the .pyc files in a __pycache__ directory, and will\n # not import from there without source. It will import a .pyc from\n # the source location though.\n if not os.path.exists(\"orphan.pyc\"):\n pycs = glob.glob(\"__pycache__/orphan.*.pyc\")\n assert len(pycs) == 1\n os.rename(pycs[0], \"orphan.pyc\")\n\n assert testdir.runpytest().ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_pyc_vs_pyo_TestRewriteOnImport.test_pyc_vs_pyo.assert_tagged_pyc_in": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_pyc_vs_pyo_TestRewriteOnImport.test_pyc_vs_pyo.assert_tagged_pyc_in", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 775, "end_line": 796, "span_ids": ["TestRewriteOnImport.test_pyc_vs_pyo"], "tokens": 248}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRewriteOnImport(object):\n\n @pytest.mark.skipif('\"__pypy__\" in sys.modules')\n def test_pyc_vs_pyo(self, testdir, monkeypatch):\n testdir.makepyfile(\n \"\"\"\n import pytest\n def test_optimized():\n \"hello\"\n assert test_optimized.__doc__ is None\"\"\"\n )\n p = py.path.local.make_numbered_dir(\n prefix=\"runpytest-\", keep=None, rootdir=testdir.tmpdir\n )\n tmp = \"--basetemp=%s\" % p\n monkeypatch.setenv(\"PYTHONOPTIMIZE\", \"2\")\n monkeypatch.delenv(\"PYTHONDONTWRITEBYTECODE\", raising=False)\n assert testdir.runpytest_subprocess(tmp).ret == 0\n tagged = \"test_pyc_vs_pyo.\" + PYTEST_TAG\n assert tagged + \".pyo\" in os.listdir(\"__pycache__\")\n monkeypatch.undo()\n monkeypatch.delenv(\"PYTHONDONTWRITEBYTECODE\", raising=False)\n assert testdir.runpytest_subprocess(tmp).ret == 1\n assert tagged + \".pyc\" in os.listdir(\"__pycache__\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_package_TestRewriteOnImport.test_rewrite_module_imported_from_conftest.assert_testdir_runpytest_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_package_TestRewriteOnImport.test_rewrite_module_imported_from_conftest.assert_testdir_runpytest_", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 798, "end_line": 848, "span_ids": ["TestRewriteOnImport.test_translate_newlines", "TestRewriteOnImport.test_rewrite_warning", "TestRewriteOnImport.test_package_without__init__py", "TestRewriteOnImport.test_package", "TestRewriteOnImport.test_rewrite_module_imported_from_conftest"], "tokens": 426}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRewriteOnImport(object):\n\n def test_package(self, testdir):\n pkg = testdir.tmpdir.join(\"pkg\")\n pkg.mkdir()\n pkg.join(\"__init__.py\").ensure()\n pkg.join(\"test_blah.py\").write(\n \"\"\"\ndef test_rewritten():\n assert \"@py_builtins\" in globals()\"\"\"\n )\n assert testdir.runpytest().ret == 0\n\n def test_translate_newlines(self, testdir):\n content = \"def test_rewritten():\\r\\n assert '@py_builtins' in globals()\"\n b = content.encode(\"utf-8\")\n testdir.tmpdir.join(\"test_newlines.py\").write(b, \"wb\")\n assert testdir.runpytest().ret == 0\n\n @pytest.mark.skipif(\n sys.version_info < (3, 4),\n reason=\"packages without __init__.py not supported on python 2\",\n )\n def test_package_without__init__py(self, testdir):\n pkg = testdir.mkdir(\"a_package_without_init_py\")\n pkg.join(\"module.py\").ensure()\n testdir.makepyfile(\"import a_package_without_init_py.module\")\n assert testdir.runpytest().ret == EXIT_NOTESTSCOLLECTED\n\n def test_rewrite_warning(self, testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n pytest.register_assert_rewrite(\"_pytest\")\n \"\"\"\n )\n # needs to be a subprocess because pytester explicitly disables this warning\n result = testdir.runpytest_subprocess()\n result.stdout.fnmatch_lines([\"*Module already imported*: _pytest\"])\n\n def test_rewrite_module_imported_from_conftest(self, testdir):\n testdir.makeconftest(\n \"\"\"\n import test_rewrite_module_imported\n \"\"\"\n )\n testdir.makepyfile(\n test_rewrite_module_imported=\"\"\"\n def test_rewritten():\n assert \"@py_builtins\" in globals()\n \"\"\"\n )\n assert testdir.runpytest_subprocess().ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_remember_rewritten_modules_TestRewriteOnImport.test_remember_rewritten_modules.assert_warnings_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_remember_rewritten_modules_TestRewriteOnImport.test_remember_rewritten_modules.assert_warnings_", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 850, "end_line": 866, "span_ids": ["TestRewriteOnImport.test_remember_rewritten_modules"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRewriteOnImport(object):\n\n def test_remember_rewritten_modules(self, pytestconfig, testdir, monkeypatch):\n \"\"\"\n AssertionRewriteHook should remember rewritten modules so it\n doesn't give false positives (#2005).\n \"\"\"\n monkeypatch.syspath_prepend(testdir.tmpdir)\n testdir.makepyfile(test_remember_rewritten_modules=\"\")\n warnings = []\n hook = AssertionRewritingHook(pytestconfig)\n monkeypatch.setattr(\n hook, \"_warn_already_imported\", lambda code, msg: warnings.append(msg)\n )\n hook.find_module(\"test_remember_rewritten_modules\")\n hook.load_module(\"test_remember_rewritten_modules\")\n hook.mark_rewrite(\"test_remember_rewritten_modules\")\n hook.mark_rewrite(\"test_remember_rewritten_modules\")\n assert warnings == []", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_rewrite_warning_using_pytest_plugins_TestRewriteOnImport.test_rewrite_warning_using_pytest_plugins.assert_pytest_warning_su": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_rewrite_warning_using_pytest_plugins_TestRewriteOnImport.test_rewrite_warning_using_pytest_plugins.assert_pytest_warning_su", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 868, "end_line": 881, "span_ids": ["TestRewriteOnImport.test_rewrite_warning_using_pytest_plugins"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRewriteOnImport(object):\n\n def test_rewrite_warning_using_pytest_plugins(self, testdir):\n testdir.makepyfile(\n **{\n \"conftest.py\": \"pytest_plugins = ['core', 'gui', 'sci']\",\n \"core.py\": \"\",\n \"gui.py\": \"pytest_plugins = ['core', 'sci']\",\n \"sci.py\": \"pytest_plugins = ['core']\",\n \"test_rewrite_warning_pytest_plugins.py\": \"def test(): pass\",\n }\n )\n testdir.chdir()\n result = testdir.runpytest_subprocess()\n result.stdout.fnmatch_lines([\"*= 1 passed in *=*\"])\n assert \"pytest-warning summary\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_rewrite_warning_using_pytest_plugins_env_var_TestRewriteOnImport.test_rewrite_warning_using_pytest_plugins_env_var.assert_pytest_warning_su": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_rewrite_warning_using_pytest_plugins_env_var_TestRewriteOnImport.test_rewrite_warning_using_pytest_plugins_env_var.assert_pytest_warning_su", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 883, "end_line": 899, "span_ids": ["TestRewriteOnImport.test_rewrite_warning_using_pytest_plugins_env_var"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRewriteOnImport(object):\n\n def test_rewrite_warning_using_pytest_plugins_env_var(self, testdir, monkeypatch):\n monkeypatch.setenv(\"PYTEST_PLUGINS\", \"plugin\")\n testdir.makepyfile(\n **{\n \"plugin.py\": \"\",\n \"test_rewrite_warning_using_pytest_plugins_env_var.py\": \"\"\"\n import plugin\n pytest_plugins = ['plugin']\n def test():\n pass\n \"\"\",\n }\n )\n testdir.chdir()\n result = testdir.runpytest_subprocess()\n result.stdout.fnmatch_lines([\"*= 1 passed in *=*\"])\n assert \"pytest-warning summary\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_rewrite_future_imports_TestRewriteOnImport.test_rewrite_future_imports.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestRewriteOnImport.test_rewrite_future_imports_TestRewriteOnImport.test_rewrite_future_imports.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 901, "end_line": 919, "span_ids": ["TestRewriteOnImport.test_rewrite_future_imports"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRewriteOnImport(object):\n\n @pytest.mark.skipif(sys.version_info[0] > 2, reason=\"python 2 only\")\n def test_rewrite_future_imports(self, testdir):\n \"\"\"Test that rewritten modules don't inherit the __future__ flags\n from the assertrewrite module.\n\n assertion.rewrite imports __future__.division (and others), so\n ensure rewritten modules don't inherit those flags.\n\n The test below will fail if __future__.division is enabled\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n def test():\n x = 1 / 2\n assert type(x) is int\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails_TestAssertionRewriteHookDetails.test_loader_is_package_true_for_package.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails_TestAssertionRewriteHookDetails.test_loader_is_package_true_for_package.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 922, "end_line": 948, "span_ids": ["TestAssertionRewriteHookDetails.test_loader_is_package_false_for_module", "TestAssertionRewriteHookDetails", "TestAssertionRewriteHookDetails.test_loader_is_package_true_for_package"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionRewriteHookDetails(object):\n def test_loader_is_package_false_for_module(self, testdir):\n testdir.makepyfile(\n test_fun=\"\"\"\n def test_loader():\n assert not __loader__.is_package(__name__)\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"* 1 passed*\"])\n\n def test_loader_is_package_true_for_package(self, testdir):\n testdir.makepyfile(\n test_fun=\"\"\"\n def test_loader():\n assert not __loader__.is_package(__name__)\n\n def test_fun():\n assert __loader__.is_package('fun')\n\n def test_missing():\n assert not __loader__.is_package('pytest_not_there')\n \"\"\"\n )\n testdir.mkpydir(\"fun\")\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"* 3 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails.test_assume_ascii_TestAssertionRewriteHookDetails.test_assume_ascii.assert_SyntaxError_Non_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails.test_assume_ascii_TestAssertionRewriteHookDetails.test_assume_ascii.assert_SyntaxError_Non_", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 950, "end_line": 957, "span_ids": ["TestAssertionRewriteHookDetails.test_assume_ascii"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionRewriteHookDetails(object):\n\n @pytest.mark.skipif(\"sys.version_info[0] >= 3\")\n @pytest.mark.xfail(\"hasattr(sys, 'pypy_translation_info')\")\n def test_assume_ascii(self, testdir):\n content = \"u'\\xe2\\x99\\xa5\\x01\\xfe'\"\n testdir.tmpdir.join(\"test_encoding.py\").write(content, \"wb\")\n res = testdir.runpytest()\n assert res.ret != 0\n assert \"SyntaxError: Non-ASCII character\" in res.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails.test_detect_coding_cookie_TestAssertionRewriteHookDetails.test_sys_meta_path_munged.assert_testdir_runpytest_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails.test_detect_coding_cookie_TestAssertionRewriteHookDetails.test_sys_meta_path_munged.assert_testdir_runpytest_", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 959, "end_line": 998, "span_ids": ["TestAssertionRewriteHookDetails.test_detect_coding_cookie_second_line", "TestAssertionRewriteHookDetails.test_detect_coding_cookie_crlf", "TestAssertionRewriteHookDetails.test_sys_meta_path_munged", "TestAssertionRewriteHookDetails.test_detect_coding_cookie"], "tokens": 342}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionRewriteHookDetails(object):\n\n @pytest.mark.skipif(\"sys.version_info[0] >= 3\")\n def test_detect_coding_cookie(self, testdir):\n testdir.makepyfile(\n test_cookie=\"\"\"\n # -*- coding: utf-8 -*-\n u\"St\\xc3\\xa4d\"\n def test_rewritten():\n assert \"@py_builtins\" in globals()\"\"\"\n )\n assert testdir.runpytest().ret == 0\n\n @pytest.mark.skipif(\"sys.version_info[0] >= 3\")\n def test_detect_coding_cookie_second_line(self, testdir):\n testdir.makepyfile(\n test_cookie=\"\"\"\n # -*- coding: utf-8 -*-\n u\"St\\xc3\\xa4d\"\n def test_rewritten():\n assert \"@py_builtins\" in globals()\"\"\"\n )\n assert testdir.runpytest().ret == 0\n\n @pytest.mark.skipif(\"sys.version_info[0] >= 3\")\n def test_detect_coding_cookie_crlf(self, testdir):\n testdir.makepyfile(\n test_cookie=\"\"\"\n # -*- coding: utf-8 -*-\n u\"St\\xc3\\xa4d\"\n def test_rewritten():\n assert \"@py_builtins\" in globals()\"\"\"\n )\n assert testdir.runpytest().ret == 0\n\n def test_sys_meta_path_munged(self, testdir):\n testdir.makepyfile(\n \"\"\"\n def test_meta_path():\n import sys; sys.meta_path = []\"\"\"\n )\n assert testdir.runpytest().ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails.test_write_pyc_TestAssertionRewriteHookDetails.test_write_pyc.assert_not__write_pyc_sta": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails.test_write_pyc_TestAssertionRewriteHookDetails.test_write_pyc.assert_not__write_pyc_sta", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 1000, "end_line": 1020, "span_ids": ["TestAssertionRewriteHookDetails.test_write_pyc"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionRewriteHookDetails(object):\n\n def test_write_pyc(self, testdir, tmpdir, monkeypatch):\n from _pytest.assertion.rewrite import _write_pyc\n from _pytest.assertion import AssertionState\n import atomicwrites\n from contextlib import contextmanager\n\n config = testdir.parseconfig([])\n state = AssertionState(config, \"rewrite\")\n source_path = tmpdir.ensure(\"source.py\")\n pycpath = tmpdir.join(\"pyc\").strpath\n assert _write_pyc(state, [1], source_path.stat(), pycpath)\n\n @contextmanager\n def atomic_write_failed(fn, mode=\"r\", overwrite=False):\n e = IOError()\n e.errno = 10\n raise e\n yield\n\n monkeypatch.setattr(atomicwrites, \"atomic_write\", atomic_write_failed)\n assert not _write_pyc(state, [1], source_path.stat(), pycpath)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails.test_resources_provider_for_loader_TestAssertionRewriteHookDetails.test_resources_provider_for_loader.result_assert_outcomes_pa": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails.test_resources_provider_for_loader_TestAssertionRewriteHookDetails.test_resources_provider_for_loader.result_assert_outcomes_pa", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 1022, "end_line": 1050, "span_ids": ["TestAssertionRewriteHookDetails.test_resources_provider_for_loader"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionRewriteHookDetails(object):\n\n def test_resources_provider_for_loader(self, testdir):\n \"\"\"\n Attempts to load resources from a package should succeed normally,\n even when the AssertionRewriteHook is used to load the modules.\n\n See #366 for details.\n \"\"\"\n pytest.importorskip(\"pkg_resources\")\n\n testdir.mkpydir(\"testpkg\")\n contents = {\n \"testpkg/test_pkg\": \"\"\"\n import pkg_resources\n\n import pytest\n from _pytest.assertion.rewrite import AssertionRewritingHook\n\n def test_load_resource():\n assert isinstance(__loader__, AssertionRewritingHook)\n res = pkg_resources.resource_string(__name__, 'resource.txt')\n res = res.decode('ascii')\n assert res == 'Load me please.'\n \"\"\"\n }\n testdir.makepyfile(**contents)\n testdir.maketxtfile(**{\"testpkg/resource\": \"Load me please.\"})\n\n result = testdir.runpytest_subprocess()\n result.assert_outcomes(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails.test_read_pyc_TestAssertionRewriteHookDetails.test_read_pyc._no_error": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails.test_read_pyc_TestAssertionRewriteHookDetails.test_read_pyc._no_error", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 1052, "end_line": 1072, "span_ids": ["TestAssertionRewriteHookDetails.test_read_pyc"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionRewriteHookDetails(object):\n\n def test_read_pyc(self, tmpdir):\n \"\"\"\n Ensure that the `_read_pyc` can properly deal with corrupted pyc files.\n In those circumstances it should just give up instead of generating\n an exception that is propagated to the caller.\n \"\"\"\n import py_compile\n from _pytest.assertion.rewrite import _read_pyc\n\n source = tmpdir.join(\"source.py\")\n pyc = source + \"c\"\n\n source.write(\"def test(): pass\")\n py_compile.compile(str(source), str(pyc))\n\n contents = pyc.read(mode=\"rb\")\n strip_bytes = 20 # header is around 8 bytes, strip a little more\n assert len(contents) > strip_bytes\n pyc.write(contents[:strip_bytes], mode=\"wb\")\n\n assert _read_pyc(source, str(pyc)) is None # no error", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails.test_reload_is_same_TestAssertionRewriteHookDetails.test_reload_is_same.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails.test_reload_is_same_TestAssertionRewriteHookDetails.test_reload_is_same.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 1074, "end_line": 1100, "span_ids": ["TestAssertionRewriteHookDetails.test_reload_is_same"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionRewriteHookDetails(object):\n\n def test_reload_is_same(self, testdir):\n # A file that will be picked up during collecting.\n testdir.tmpdir.join(\"file.py\").ensure()\n testdir.tmpdir.join(\"pytest.ini\").write(\n textwrap.dedent(\n \"\"\"\n [pytest]\n python_files = *.py\n \"\"\"\n )\n )\n\n testdir.makepyfile(\n test_fun=\"\"\"\n import sys\n try:\n from imp import reload\n except ImportError:\n pass\n\n def test_loader():\n import file\n assert sys.modules[\"file\"] is reload(file)\n \"\"\"\n )\n result = testdir.runpytest(\"-s\")\n result.stdout.fnmatch_lines([\"* 1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails.test_reload_reloads_TestAssertionRewriteHookDetails.test_reload_reloads.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails.test_reload_reloads_TestAssertionRewriteHookDetails.test_reload_reloads.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 1102, "end_line": 1142, "span_ids": ["TestAssertionRewriteHookDetails.test_reload_reloads"], "tokens": 227}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionRewriteHookDetails(object):\n\n def test_reload_reloads(self, testdir):\n \"\"\"Reloading a module after change picks up the change.\"\"\"\n testdir.tmpdir.join(\"file.py\").write(\n textwrap.dedent(\n \"\"\"\n def reloaded():\n return False\n\n def rewrite_self():\n with open(__file__, 'w') as self:\n self.write('def reloaded(): return True')\n \"\"\"\n )\n )\n testdir.tmpdir.join(\"pytest.ini\").write(\n textwrap.dedent(\n \"\"\"\n [pytest]\n python_files = *.py\n \"\"\"\n )\n )\n\n testdir.makepyfile(\n test_fun=\"\"\"\n import sys\n try:\n from imp import reload\n except ImportError:\n pass\n\n def test_loader():\n import file\n assert not file.reloaded()\n file.rewrite_self()\n reload(file)\n assert file.reloaded()\n \"\"\"\n )\n result = testdir.runpytest(\"-s\")\n result.stdout.fnmatch_lines([\"* 1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails.test_get_data_support_TestAssertionRewriteHookDetails.test_get_data_support.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestAssertionRewriteHookDetails.test_get_data_support_TestAssertionRewriteHookDetails.test_get_data_support.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 1144, "end_line": 1161, "span_ids": ["TestAssertionRewriteHookDetails.test_get_data_support"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionRewriteHookDetails(object):\n\n def test_get_data_support(self, testdir):\n \"\"\"Implement optional PEP302 api (#808).\n \"\"\"\n path = testdir.mkpydir(\"foo\")\n path.join(\"test_foo.py\").write(\n textwrap.dedent(\n \"\"\"\\\n class Test(object):\n def test_foo(self):\n import pkgutil\n data = pkgutil.get_data('foo.test_foo', 'data.txt')\n assert data == b'Hey'\n \"\"\"\n )\n )\n path.join(\"data.txt\").write(\"Hey\")\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_test_issue731_test_issue731.assert_unbalanced_braces": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_test_issue731_test_issue731.assert_unbalanced_braces", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 1164, "end_line": 1180, "span_ids": ["test_issue731"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_issue731(testdir):\n testdir.makepyfile(\n \"\"\"\n class LongReprWithBraces(object):\n def __repr__(self):\n return 'LongReprWithBraces({' + ('a' * 80) + '}' + ('a' * 120) + ')'\n\n def some_method(self):\n return False\n\n def test_long_repr():\n obj = LongReprWithBraces()\n assert obj.some_method()\n \"\"\"\n )\n result = testdir.runpytest()\n assert \"unbalanced braces\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestIssue925_TestIssue925.test_many_brackets.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestIssue925_TestIssue925.test_many_brackets.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 1183, "end_line": 1212, "span_ids": ["TestIssue925.test_simple_case", "TestIssue925", "TestIssue925.test_many_brackets", "TestIssue925.test_long_case"], "tokens": 218}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestIssue925(object):\n def test_simple_case(self, testdir):\n testdir.makepyfile(\n \"\"\"\n def test_ternary_display():\n assert (False == False) == False\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*E*assert (False == False) == False\"])\n\n def test_long_case(self, testdir):\n testdir.makepyfile(\n \"\"\"\n def test_ternary_display():\n assert False == (False == True) == True\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*E*assert (False == True) == True\"])\n\n def test_many_brackets(self, testdir):\n testdir.makepyfile(\n \"\"\"\n def test_ternary_display():\n assert True == ((False == True) == True)\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*E*assert True == ((False == True) == True)\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestIssue2121_TestIssue2121.test_rewrite_python_files_contain_subdirs.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_TestIssue2121_TestIssue2121.test_rewrite_python_files_contain_subdirs.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 1215, "end_line": 1232, "span_ids": ["TestIssue2121", "TestIssue2121.test_rewrite_python_files_contain_subdirs"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestIssue2121:\n def test_rewrite_python_files_contain_subdirs(self, testdir):\n testdir.makepyfile(\n **{\n \"tests/file.py\": \"\"\"\n def test_simple_failure():\n assert 1 + 1 == 3\n \"\"\"\n }\n )\n testdir.makeini(\n \"\"\"\n [pytest]\n python_files = tests/**.py\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*E*assert (1 + 1) == 3\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_test_source_mtime_long_long_test_source_mtime_long_long.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_assertrewrite.py_test_source_mtime_long_long_test_source_mtime_long_long.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_assertrewrite.py", "file_name": "test_assertrewrite.py", "file_type": "text/x-python", "category": "test", "start_line": 1235, "end_line": 1255, "span_ids": ["test_source_mtime_long_long"], "tokens": 165}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"offset\", [-1, +1])\ndef test_source_mtime_long_long(testdir, offset):\n \"\"\"Support modification dates after 2038 in rewritten files (#4903).\n\n pytest would crash with:\n\n fp.write(struct.pack(\"= (3, 0):\n obj = \"'b\\u00f6y'\"\n else:\n obj = \"u'\\u00f6y'\"\n testdir.makepyfile(\n \"\"\"\n # coding=utf8\n # taken from issue 227 from nosetests\n def test_unicode():\n import sys\n print(sys.stdout)\n print(%s)\n \"\"\"\n % obj\n )\n result = testdir.runpytest(\"--capture=%s\" % method)\n result.stdout.fnmatch_lines([\"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_capturing_bytes_in_utf8_encoding_test_collect_capturing.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_capturing_bytes_in_utf8_encoding_test_collect_capturing.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 122, "end_line": 152, "span_ids": ["test_capturing_bytes_in_utf8_encoding", "test_collect_capturing"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"method\", [\"fd\", \"sys\"])\ndef test_capturing_bytes_in_utf8_encoding(testdir, method):\n testdir.makepyfile(\n \"\"\"\n def test_unicode():\n print('b\\\\u00f6y')\n \"\"\"\n )\n result = testdir.runpytest(\"--capture=%s\" % method)\n result.stdout.fnmatch_lines([\"*1 passed*\"])\n\n\ndef test_collect_capturing(testdir):\n p = testdir.makepyfile(\n \"\"\"\n import sys\n\n print(\"collect %s failure\" % 13)\n sys.stderr.write(\"collect %s_stderr failure\" % 13)\n import xyz42123\n \"\"\"\n )\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines(\n [\n \"*Captured stdout*\",\n \"collect 13 failure\",\n \"*Captured stderr*\",\n \"collect 13_stderr failure\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestPerTestCapturing_TestPerTestCapturing.test_capture_and_fixtures.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestPerTestCapturing_TestPerTestCapturing.test_capture_and_fixtures.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 155, "end_line": 180, "span_ids": ["TestPerTestCapturing.test_capture_and_fixtures", "TestPerTestCapturing"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPerTestCapturing(object):\n def test_capture_and_fixtures(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n def setup_module(mod):\n print(\"setup module\")\n def setup_function(function):\n print(\"setup \" + function.__name__)\n def test_func1():\n print(\"in func1\")\n assert 0\n def test_func2():\n print(\"in func2\")\n assert 0\n \"\"\"\n )\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines(\n [\n \"setup module*\",\n \"setup test_func1*\",\n \"in func1*\",\n \"setup test_func2*\",\n \"in func2*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestPerTestCapturing.test_capture_scope_cache_TestPerTestCapturing.test_capture_scope_cache.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestPerTestCapturing.test_capture_scope_cache_TestPerTestCapturing.test_capture_scope_cache.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 182, "end_line": 208, "span_ids": ["TestPerTestCapturing.test_capture_scope_cache"], "tokens": 165}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPerTestCapturing(object):\n\n @pytest.mark.xfail(reason=\"unimplemented feature\")\n def test_capture_scope_cache(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n import sys\n def setup_module(func):\n print(\"module-setup\")\n def setup_function(func):\n print(\"function-setup\")\n def test_func():\n print(\"in function\")\n assert 0\n def teardown_function(func):\n print(\"in teardown\")\n \"\"\"\n )\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines(\n [\n \"*test_func():*\",\n \"*Captured stdout during setup*\",\n \"module-setup*\",\n \"function-setup*\",\n \"*Captured stdout*\",\n \"in teardown*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestPerTestCapturing.test_no_carry_over_TestPerTestCapturing.test_teardown_capturing.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestPerTestCapturing.test_no_carry_over_TestPerTestCapturing.test_teardown_capturing.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 210, "end_line": 248, "span_ids": ["TestPerTestCapturing.test_teardown_capturing", "TestPerTestCapturing.test_no_carry_over"], "tokens": 245}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPerTestCapturing(object):\n\n def test_no_carry_over(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n def test_func1():\n print(\"in func1\")\n def test_func2():\n print(\"in func2\")\n assert 0\n \"\"\"\n )\n result = testdir.runpytest(p)\n s = result.stdout.str()\n assert \"in func1\" not in s\n assert \"in func2\" in s\n\n def test_teardown_capturing(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n def setup_function(function):\n print(\"setup func1\")\n def teardown_function(function):\n print(\"teardown func1\")\n assert 0\n def test_func1():\n print(\"in func1\")\n pass\n \"\"\"\n )\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines(\n [\n \"*teardown_function*\",\n \"*Captured stdout*\",\n \"setup func1*\",\n \"in func1*\",\n \"teardown func1*\",\n # \"*1 fixture failure*\"\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestPerTestCapturing.test_teardown_capturing_final_TestPerTestCapturing.test_teardown_capturing_final.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestPerTestCapturing.test_teardown_capturing_final_TestPerTestCapturing.test_teardown_capturing_final.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 250, "end_line": 268, "span_ids": ["TestPerTestCapturing.test_teardown_capturing_final"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPerTestCapturing(object):\n\n def test_teardown_capturing_final(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n def teardown_module(mod):\n print(\"teardown module\")\n assert 0\n def test_func():\n pass\n \"\"\"\n )\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines(\n [\n \"*def teardown_module(mod):*\",\n \"*Captured stdout*\",\n \"*teardown module*\",\n \"*1 error*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestPerTestCapturing.test_capturing_outerr_TestPerTestCapturing.test_capturing_outerr.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestPerTestCapturing.test_capturing_outerr_TestPerTestCapturing.test_capturing_outerr.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 270, "end_line": 295, "span_ids": ["TestPerTestCapturing.test_capturing_outerr"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPerTestCapturing(object):\n\n def test_capturing_outerr(self, testdir):\n p1 = testdir.makepyfile(\n \"\"\"\\\n import sys\n def test_capturing():\n print(42)\n sys.stderr.write(str(23))\n def test_capturing_error():\n print(1)\n sys.stderr.write(str(2))\n raise ValueError\n \"\"\"\n )\n result = testdir.runpytest(p1)\n result.stdout.fnmatch_lines(\n [\n \"*test_capturing_outerr.py .F*\",\n \"====* FAILURES *====\",\n \"____*____\",\n \"*test_capturing_outerr.py:8: ValueError\",\n \"*--- Captured stdout *call*\",\n \"1\",\n \"*--- Captured stderr *call*\",\n \"2\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestLoggingInteraction_TestLoggingInteraction.test_logging_and_immediate_setupteardown.for_optargs_in_captu.assert_closed_not_in_s": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestLoggingInteraction_TestLoggingInteraction.test_logging_and_immediate_setupteardown.for_optargs_in_captu.assert_closed_not_in_s", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 298, "end_line": 337, "span_ids": ["TestLoggingInteraction.test_logging_and_immediate_setupteardown", "TestLoggingInteraction.test_logging_stream_ownership", "TestLoggingInteraction"], "tokens": 277}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestLoggingInteraction(object):\n def test_logging_stream_ownership(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\\\n def test_logging():\n import logging\n import pytest\n stream = capture.CaptureIO()\n logging.basicConfig(stream=stream)\n stream.close() # to free memory/release resources\n \"\"\"\n )\n result = testdir.runpytest_subprocess(p)\n assert result.stderr.str().find(\"atexit\") == -1\n\n def test_logging_and_immediate_setupteardown(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\\\n import logging\n def setup_function(function):\n logging.warning(\"hello1\")\n\n def test_logging():\n logging.warning(\"hello2\")\n assert 0\n\n def teardown_function(function):\n logging.warning(\"hello3\")\n assert 0\n \"\"\"\n )\n for optargs in ((\"--capture=sys\",), (\"--capture=fd\",)):\n print(optargs)\n result = testdir.runpytest_subprocess(p, *optargs)\n s = result.stdout.str()\n result.stdout.fnmatch_lines(\n [\"*WARN*hello3\", \"*WARN*hello1\", \"*WARN*hello2\"] # errors show first!\n )\n # verify proper termination\n assert \"closed\" not in s", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestLoggingInteraction.test_logging_and_crossscope_fixtures_TestLoggingInteraction.test_logging_and_crossscope_fixtures.for_optargs_in_captu.assert_closed_not_in_s": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestLoggingInteraction.test_logging_and_crossscope_fixtures_TestLoggingInteraction.test_logging_and_crossscope_fixtures.for_optargs_in_captu.assert_closed_not_in_s", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 339, "end_line": 363, "span_ids": ["TestLoggingInteraction.test_logging_and_crossscope_fixtures"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestLoggingInteraction(object):\n\n def test_logging_and_crossscope_fixtures(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\\\n import logging\n def setup_module(function):\n logging.warning(\"hello1\")\n\n def test_logging():\n logging.warning(\"hello2\")\n assert 0\n\n def teardown_module(function):\n logging.warning(\"hello3\")\n assert 0\n \"\"\"\n )\n for optargs in ((\"--capture=sys\",), (\"--capture=fd\",)):\n print(optargs)\n result = testdir.runpytest_subprocess(p, *optargs)\n s = result.stdout.str()\n result.stdout.fnmatch_lines(\n [\"*WARN*hello3\", \"*WARN*hello1\", \"*WARN*hello2\"] # errors come first\n )\n # verify proper termination\n assert \"closed\" not in s", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestLoggingInteraction.test_conftestlogging_is_shown_TestLoggingInteraction.test_conftestlogging_is_shown.assert_operation_on_clos": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestLoggingInteraction.test_conftestlogging_is_shown_TestLoggingInteraction.test_conftestlogging_is_shown.assert_operation_on_clos", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 365, "end_line": 377, "span_ids": ["TestLoggingInteraction.test_conftestlogging_is_shown"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestLoggingInteraction(object):\n\n def test_conftestlogging_is_shown(self, testdir):\n testdir.makeconftest(\n \"\"\"\\\n import logging\n logging.basicConfig()\n logging.warning(\"hello435\")\n \"\"\"\n )\n # make sure that logging is still captured in tests\n result = testdir.runpytest_subprocess(\"-s\", \"-p\", \"no:capturelog\")\n assert result.ret == EXIT_NOTESTSCOLLECTED\n result.stderr.fnmatch_lines([\"WARNING*hello435*\"])\n assert \"operation on closed file\" not in result.stderr.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestLoggingInteraction.test_conftestlogging_and_test_logging_TestLoggingInteraction.test_conftestlogging_and_test_logging.assert_operation_on_clos": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestLoggingInteraction.test_conftestlogging_and_test_logging_TestLoggingInteraction.test_conftestlogging_and_test_logging.assert_operation_on_clos", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 379, "end_line": 399, "span_ids": ["TestLoggingInteraction.test_conftestlogging_and_test_logging"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestLoggingInteraction(object):\n\n def test_conftestlogging_and_test_logging(self, testdir):\n testdir.makeconftest(\n \"\"\"\\\n import logging\n logging.basicConfig()\n \"\"\"\n )\n # make sure that logging is still captured in tests\n p = testdir.makepyfile(\n \"\"\"\\\n def test_hello():\n import logging\n logging.warning(\"hello433\")\n assert 0\n \"\"\"\n )\n result = testdir.runpytest_subprocess(p, \"-p\", \"no:capturelog\")\n assert result.ret != 0\n result.stdout.fnmatch_lines([\"WARNING*hello433*\"])\n assert \"something\" not in result.stderr.str()\n assert \"operation on closed file\" not in result.stderr.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestLoggingInteraction.test_logging_after_cap_stopped_TestLoggingInteraction.test_logging_after_cap_stopped.assert_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestLoggingInteraction.test_logging_after_cap_stopped_TestLoggingInteraction.test_logging_after_cap_stopped.assert_", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 401, "end_line": 433, "span_ids": ["TestLoggingInteraction.test_logging_after_cap_stopped"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestLoggingInteraction(object):\n\n def test_logging_after_cap_stopped(self, testdir):\n testdir.makeconftest(\n \"\"\"\\\n import pytest\n import logging\n\n log = logging.getLogger(__name__)\n\n @pytest.fixture\n def log_on_teardown():\n yield\n log.warning('Logging on teardown')\n \"\"\"\n )\n # make sure that logging is still captured in tests\n p = testdir.makepyfile(\n \"\"\"\\\n def test_hello(log_on_teardown):\n import logging\n logging.warning(\"hello433\")\n assert 1\n raise KeyboardInterrupt()\n \"\"\"\n )\n result = testdir.runpytest_subprocess(p, \"--log-cli-level\", \"info\")\n assert result.ret != 0\n result.stdout.fnmatch_lines(\n [\"*WARNING*hello433*\", \"*WARNING*Logging on teardown*\"]\n )\n assert (\n \"AttributeError: 'NoneType' object has no attribute 'resume_capturing'\"\n not in result.stderr.str()\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture_TestCaptureFixture.test_capsyscapfd.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture_TestCaptureFixture.test_capsyscapfd.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 436, "end_line": 468, "span_ids": ["TestCaptureFixture", "TestCaptureFixture.test_capsyscapfd", "TestCaptureFixture.test_std_functional"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCaptureFixture(object):\n @pytest.mark.parametrize(\"opt\", [[], [\"-s\"]])\n def test_std_functional(self, testdir, opt):\n reprec = testdir.inline_runsource(\n \"\"\"\\\n def test_hello(capsys):\n print(42)\n out, err = capsys.readouterr()\n assert out.startswith(\"42\")\n \"\"\",\n *opt\n )\n reprec.assertoutcome(passed=1)\n\n def test_capsyscapfd(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\\\n def test_one(capsys, capfd):\n pass\n def test_two(capfd, capsys):\n pass\n \"\"\"\n )\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines(\n [\n \"*ERROR*setup*test_one*\",\n \"E*capfd*capsys*same*time*\",\n \"*ERROR*setup*test_two*\",\n \"E*capsys*capfd*same*time*\",\n \"*2 error*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_capturing_getfixturevalue_TestCaptureFixture.test_capturing_getfixturevalue.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_capturing_getfixturevalue_TestCaptureFixture.test_capturing_getfixturevalue.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 470, "end_line": 491, "span_ids": ["TestCaptureFixture.test_capturing_getfixturevalue"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCaptureFixture(object):\n\n def test_capturing_getfixturevalue(self, testdir):\n \"\"\"Test that asking for \"capfd\" and \"capsys\" using request.getfixturevalue\n in the same test is an error.\n \"\"\"\n testdir.makepyfile(\n \"\"\"\\\n def test_one(capsys, request):\n request.getfixturevalue(\"capfd\")\n def test_two(capfd, request):\n request.getfixturevalue(\"capsys\")\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"*test_one*\",\n \"*capsys*capfd*same*time*\",\n \"*test_two*\",\n \"*capfd*capsys*same*time*\",\n \"*2 failed in*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_capsyscapfdbinary_TestCaptureFixture.test_stdfd_functional.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_capsyscapfdbinary_TestCaptureFixture.test_stdfd_functional.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 493, "end_line": 531, "span_ids": ["TestCaptureFixture.test_capsyscapfdbinary", "TestCaptureFixture.test_capture_is_represented_on_failure_issue128", "TestCaptureFixture.test_stdfd_functional"], "tokens": 283}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCaptureFixture(object):\n\n def test_capsyscapfdbinary(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\\\n def test_one(capsys, capfdbinary):\n pass\n \"\"\"\n )\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines(\n [\"*ERROR*setup*test_one*\", \"E*capfdbinary*capsys*same*time*\", \"*1 error*\"]\n )\n\n @pytest.mark.parametrize(\"method\", [\"sys\", \"fd\"])\n def test_capture_is_represented_on_failure_issue128(self, testdir, method):\n p = testdir.makepyfile(\n \"\"\"\\\n def test_hello(cap{}):\n print(\"xxx42xxx\")\n assert 0\n \"\"\".format(\n method\n )\n )\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines([\"xxx42xxx\"])\n\n @needsosdup\n def test_stdfd_functional(self, testdir):\n reprec = testdir.inline_runsource(\n \"\"\"\\\n def test_hello(capfd):\n import os\n os.write(1, \"42\".encode('ascii'))\n out, err = capfd.readouterr()\n assert out.startswith(\"42\")\n capfd.close()\n \"\"\"\n )\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_capfdbinary_TestCaptureFixture.test_capfdbinary.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_capfdbinary_TestCaptureFixture.test_capfdbinary.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 533, "end_line": 546, "span_ids": ["TestCaptureFixture.test_capfdbinary"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCaptureFixture(object):\n\n @needsosdup\n def test_capfdbinary(self, testdir):\n reprec = testdir.inline_runsource(\n \"\"\"\\\n def test_hello(capfdbinary):\n import os\n # some likely un-decodable bytes\n os.write(1, b'\\\\xfe\\\\x98\\\\x20')\n out, err = capfdbinary.readouterr()\n assert out == b'\\\\xfe\\\\x98\\\\x20'\n assert err == b''\n \"\"\"\n )\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_capsysbinary_TestCaptureFixture.test_capsysbinary.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_capsysbinary_TestCaptureFixture.test_capsysbinary.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 548, "end_line": 563, "span_ids": ["TestCaptureFixture.test_capsysbinary"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCaptureFixture(object):\n\n @pytest.mark.skipif(\n sys.version_info < (3,), reason=\"only have capsysbinary in python 3\"\n )\n def test_capsysbinary(self, testdir):\n reprec = testdir.inline_runsource(\n \"\"\"\\\n def test_hello(capsysbinary):\n import sys\n # some likely un-decodable bytes\n sys.stdout.buffer.write(b'\\\\xfe\\\\x98\\\\x20')\n out, err = capsysbinary.readouterr()\n assert out == b'\\\\xfe\\\\x98\\\\x20'\n assert err == b''\n \"\"\"\n )\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_capsysbinary_forbidden_in_python2_TestCaptureFixture.test_capsysbinary_forbidden_in_python2.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_capsysbinary_forbidden_in_python2_TestCaptureFixture.test_capsysbinary_forbidden_in_python2.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 565, "end_line": 582, "span_ids": ["TestCaptureFixture.test_capsysbinary_forbidden_in_python2"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCaptureFixture(object):\n\n @pytest.mark.skipif(\n sys.version_info >= (3,), reason=\"only have capsysbinary in python 3\"\n )\n def test_capsysbinary_forbidden_in_python2(self, testdir):\n testdir.makepyfile(\n \"\"\"\\\n def test_hello(capsysbinary):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"*test_hello*\",\n \"*capsysbinary is only supported on Python 3*\",\n \"*1 error in*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_partial_setup_failure_TestCaptureFixture.test_capture_and_logging.assert_closed_not_in_re": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_partial_setup_failure_TestCaptureFixture.test_capture_and_logging.assert_closed_not_in_re", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 584, "end_line": 618, "span_ids": ["TestCaptureFixture.test_partial_setup_failure", "TestCaptureFixture.test_keyboardinterrupt_disables_capturing", "TestCaptureFixture.test_capture_and_logging"], "tokens": 246}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCaptureFixture(object):\n\n def test_partial_setup_failure(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\\\n def test_hello(capsys, missingarg):\n pass\n \"\"\"\n )\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines([\"*test_partial_setup_failure*\", \"*1 error*\"])\n\n @needsosdup\n def test_keyboardinterrupt_disables_capturing(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\\\n def test_hello(capfd):\n import os\n os.write(1, str(42).encode('ascii'))\n raise KeyboardInterrupt()\n \"\"\"\n )\n result = testdir.runpytest_subprocess(p)\n result.stdout.fnmatch_lines([\"*KeyboardInterrupt*\"])\n assert result.ret == 2\n\n @pytest.mark.issue(14)\n def test_capture_and_logging(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\\\n import logging\n def test_log(capsys):\n logging.error('x')\n \"\"\"\n )\n result = testdir.runpytest_subprocess(p)\n assert \"closed\" not in result.stderr.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_disabled_capture_fixture_TestCaptureFixture.test_disabled_capture_fixture.if_no_capture_.else_.assert_test_normal_execu": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_disabled_capture_fixture_TestCaptureFixture.test_disabled_capture_fixture.if_no_capture_.else_.assert_test_normal_execu", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 620, "end_line": 650, "span_ids": ["TestCaptureFixture.test_disabled_capture_fixture"], "tokens": 249}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCaptureFixture(object):\n\n @pytest.mark.parametrize(\"fixture\", [\"capsys\", \"capfd\"])\n @pytest.mark.parametrize(\"no_capture\", [True, False])\n def test_disabled_capture_fixture(self, testdir, fixture, no_capture):\n testdir.makepyfile(\n \"\"\"\\\n def test_disabled({fixture}):\n print('captured before')\n with {fixture}.disabled():\n print('while capture is disabled')\n print('captured after')\n assert {fixture}.readouterr() == ('captured before\\\\ncaptured after\\\\n', '')\n\n def test_normal():\n print('test_normal executed')\n \"\"\".format(\n fixture=fixture\n )\n )\n args = (\"-s\",) if no_capture else ()\n result = testdir.runpytest_subprocess(*args)\n result.stdout.fnmatch_lines(\n \"\"\"\n *while capture is disabled*\n \"\"\"\n )\n assert \"captured before\" not in result.stdout.str()\n assert \"captured after\" not in result.stdout.str()\n if no_capture:\n assert \"test_normal executed\" in result.stdout.str()\n else:\n assert \"test_normal executed\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_fixture_use_by_other_fixtures_TestCaptureFixture.test_fixture_use_by_other_fixtures.assert_stderr_contents_b": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_fixture_use_by_other_fixtures_TestCaptureFixture.test_fixture_use_by_other_fixtures.assert_stderr_contents_b", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 652, "end_line": 688, "span_ids": ["TestCaptureFixture.test_fixture_use_by_other_fixtures"], "tokens": 290}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCaptureFixture(object):\n\n @pytest.mark.parametrize(\"fixture\", [\"capsys\", \"capfd\"])\n def test_fixture_use_by_other_fixtures(self, testdir, fixture):\n \"\"\"\n Ensure that capsys and capfd can be used by other fixtures during setup and teardown.\n \"\"\"\n testdir.makepyfile(\n \"\"\"\\\n from __future__ import print_function\n import sys\n import pytest\n\n @pytest.fixture\n def captured_print({fixture}):\n print('stdout contents begin')\n print('stderr contents begin', file=sys.stderr)\n out, err = {fixture}.readouterr()\n\n yield out, err\n\n print('stdout contents end')\n print('stderr contents end', file=sys.stderr)\n out, err = {fixture}.readouterr()\n assert out == 'stdout contents end\\\\n'\n assert err == 'stderr contents end\\\\n'\n\n def test_captured_print(captured_print):\n out, err = captured_print\n assert out == 'stdout contents begin\\\\n'\n assert err == 'stderr contents begin\\\\n'\n \"\"\".format(\n fixture=fixture\n )\n )\n result = testdir.runpytest_subprocess()\n result.stdout.fnmatch_lines([\"*1 passed*\"])\n assert \"stdout contents begin\" not in result.stdout.str()\n assert \"stderr contents begin\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_fixture_use_by_other_fixtures_teardown_TestCaptureFixture.test_fixture_use_by_other_fixtures_teardown.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureFixture.test_fixture_use_by_other_fixtures_teardown_TestCaptureFixture.test_fixture_use_by_other_fixtures_teardown.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 690, "end_line": 716, "span_ids": ["TestCaptureFixture.test_fixture_use_by_other_fixtures_teardown"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCaptureFixture(object):\n\n @pytest.mark.parametrize(\"cap\", [\"capsys\", \"capfd\"])\n def test_fixture_use_by_other_fixtures_teardown(self, testdir, cap):\n \"\"\"Ensure we can access setup and teardown buffers from teardown when using capsys/capfd (##3033)\"\"\"\n testdir.makepyfile(\n \"\"\"\\\n import sys\n import pytest\n import os\n\n @pytest.fixture()\n def fix({cap}):\n print(\"setup out\")\n sys.stderr.write(\"setup err\\\\n\")\n yield\n out, err = {cap}.readouterr()\n assert out == 'setup out\\\\ncall out\\\\n'\n assert err == 'setup err\\\\ncall err\\\\n'\n\n def test_a(fix):\n print(\"call out\")\n sys.stderr.write(\"call err\\\\n\")\n \"\"\".format(\n cap=cap\n )\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_setup_failure_does_not_kill_capturing_test_setup_failure_does_not_kill_capturing.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_setup_failure_does_not_kill_capturing_test_setup_failure_does_not_kill_capturing.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 719, "end_line": 731, "span_ids": ["test_setup_failure_does_not_kill_capturing"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_setup_failure_does_not_kill_capturing(testdir):\n sub1 = testdir.mkpydir(\"sub1\")\n sub1.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n def pytest_runtest_setup(item):\n raise ValueError(42)\n \"\"\"\n )\n )\n sub1.join(\"test_mod.py\").write(\"def test_func1(): pass\")\n result = testdir.runpytest(testdir.tmpdir, \"--traceconfig\")\n result.stdout.fnmatch_lines([\"*ValueError(42)*\", \"*1 error*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_fdfuncarg_skips_on_no_osdup_test_capture_conftest_runtest_setup.assert_hello19_not_in_r": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_fdfuncarg_skips_on_no_osdup_test_capture_conftest_runtest_setup.assert_hello19_not_in_r", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 734, "end_line": 758, "span_ids": ["test_capture_conftest_runtest_setup", "test_fdfuncarg_skips_on_no_osdup"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fdfuncarg_skips_on_no_osdup(testdir):\n testdir.makepyfile(\n \"\"\"\n import os\n if hasattr(os, 'dup'):\n del os.dup\n def test_hello(capfd):\n pass\n \"\"\"\n )\n result = testdir.runpytest_subprocess(\"--capture=no\")\n result.stdout.fnmatch_lines([\"*1 skipped*\"])\n\n\ndef test_capture_conftest_runtest_setup(testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_runtest_setup():\n print(\"hello19\")\n \"\"\"\n )\n testdir.makepyfile(\"def test_func(): pass\")\n result = testdir.runpytest()\n assert result.ret == 0\n assert \"hello19\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_capture_badoutput_issue412_test_capture_early_option_parsing.assert_hello19_in_resul": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_capture_badoutput_issue412_test_capture_early_option_parsing.assert_hello19_in_resul", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 761, "end_line": 793, "span_ids": ["test_capture_early_option_parsing", "test_capture_badoutput_issue412"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_capture_badoutput_issue412(testdir):\n testdir.makepyfile(\n \"\"\"\n import os\n\n def test_func():\n omg = bytearray([1,129,1])\n os.write(1, omg)\n assert 0\n \"\"\"\n )\n result = testdir.runpytest(\"--cap=fd\")\n result.stdout.fnmatch_lines(\n \"\"\"\n *def test_func*\n *assert 0*\n *Captured*\n *1 failed*\n \"\"\"\n )\n\n\ndef test_capture_early_option_parsing(testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_runtest_setup():\n print(\"hello19\")\n \"\"\"\n )\n testdir.makepyfile(\"def test_func(): pass\")\n result = testdir.runpytest(\"-vs\")\n assert result.ret == 0\n assert \"hello19\" in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_capture_binary_output_test_capture_binary_output.result_assert_outcomes_pa": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_capture_binary_output_test_capture_binary_output.result_assert_outcomes_pa", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 796, "end_line": 814, "span_ids": ["test_capture_binary_output"], "tokens": 107}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_capture_binary_output(testdir):\n testdir.makepyfile(\n r\"\"\"\n import pytest\n\n def test_a():\n import sys\n import subprocess\n subprocess.call([sys.executable, __file__])\n\n def test_foo():\n import os;os.write(1, b'\\xc3')\n\n if __name__ == '__main__':\n test_foo()\n \"\"\"\n )\n result = testdir.runpytest(\"--assert=plain\")\n result.assert_outcomes(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_error_during_readouterr_test_error_during_readouterr.result_stderr_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_error_during_readouterr_test_error_during_readouterr.result_stderr_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 817, "end_line": 833, "span_ids": ["test_error_during_readouterr"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_error_during_readouterr(testdir):\n \"\"\"Make sure we suspend capturing if errors occur during readouterr\"\"\"\n testdir.makepyfile(\n pytest_xyz=\"\"\"\n from _pytest.capture import FDCapture\n\n def bad_snap(self):\n raise Exception('boom')\n\n assert FDCapture.snap\n FDCapture.snap = bad_snap\n \"\"\"\n )\n result = testdir.runpytest_subprocess(\"-p\", \"pytest_xyz\", \"--version\")\n result.stderr.fnmatch_lines(\n [\"*in bad_snap\", \" raise Exception('boom')\", \"Exception: boom\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureIO_TestCaptureIO.test_write_bytes_to_buffer.assert_f_getvalue_f": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestCaptureIO_TestCaptureIO.test_write_bytes_to_buffer.assert_f_getvalue_f", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 836, "end_line": 863, "span_ids": ["TestCaptureIO.test_write_bytes_to_buffer", "TestCaptureIO.test_unicode_and_str_mixture", "TestCaptureIO.test_text", "TestCaptureIO"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCaptureIO(object):\n def test_text(self):\n f = capture.CaptureIO()\n f.write(\"hello\")\n s = f.getvalue()\n assert s == \"hello\"\n f.close()\n\n def test_unicode_and_str_mixture(self):\n f = capture.CaptureIO()\n if sys.version_info >= (3, 0):\n f.write(\"\\u00f6\")\n pytest.raises(TypeError, f.write, b\"hello\")\n else:\n f.write(u\"\\u00f6\")\n f.write(b\"hello\")\n s = f.getvalue()\n f.close()\n assert isinstance(s, text_type)\n\n @pytest.mark.skipif(sys.version_info[0] == 2, reason=\"python 3 only behaviour\")\n def test_write_bytes_to_buffer(self):\n \"\"\"In python3, stdout / stderr are text io wrappers (exposing a buffer\n property of the underlying bytestream). See issue #1407\n \"\"\"\n f = capture.CaptureIO()\n f.buffer.write(b\"foo\\r\\n\")\n assert f.getvalue() == \"foo\\r\\n\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_dontreadfrominput_test_dontreadfrominput_buffer_python3._just_for_completeness": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_dontreadfrominput_test_dontreadfrominput_buffer_python3._just_for_completeness", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 866, "end_line": 891, "span_ids": ["test_dontreadfrominput", "test_dontreadfrominput_buffer_python3"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dontreadfrominput():\n from _pytest.capture import DontReadFromInput\n\n f = DontReadFromInput()\n assert not f.isatty()\n pytest.raises(IOError, f.read)\n pytest.raises(IOError, f.readlines)\n iter_f = iter(f)\n pytest.raises(IOError, next, iter_f)\n pytest.raises(UnsupportedOperation, f.fileno)\n f.close() # just for completeness\n\n\n@pytest.mark.skipif(\"sys.version_info < (3,)\", reason=\"python2 has no buffer\")\ndef test_dontreadfrominput_buffer_python3():\n from _pytest.capture import DontReadFromInput\n\n f = DontReadFromInput()\n fb = f.buffer\n assert not fb.isatty()\n pytest.raises(IOError, fb.read)\n pytest.raises(IOError, fb.readlines)\n iter_f = iter(f)\n pytest.raises(IOError, next, iter_f)\n pytest.raises(ValueError, fb.fileno)\n f.close() # just for completeness", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_dontreadfrominput_buffer_python2_test_dupfile.None_4": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_dontreadfrominput_buffer_python2_test_dupfile.None_4", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 894, "end_line": 936, "span_ids": ["test_dupfile", "test_dontreadfrominput_buffer_python2", "tmpfile"], "tokens": 311}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"sys.version_info >= (3,)\", reason=\"python2 has no buffer\")\ndef test_dontreadfrominput_buffer_python2():\n from _pytest.capture import DontReadFromInput\n\n f = DontReadFromInput()\n with pytest.raises(AttributeError):\n f.buffer\n f.close() # just for completeness\n\n\n@pytest.yield_fixture\ndef tmpfile(testdir):\n f = testdir.makepyfile(\"\").open(\"wb+\")\n yield f\n if not f.closed:\n f.close()\n\n\n@needsosdup\ndef test_dupfile(tmpfile):\n flist = []\n for i in range(5):\n nf = capture.safe_text_dupfile(tmpfile, \"wb\")\n assert nf != tmpfile\n assert nf.fileno() != tmpfile.fileno()\n assert nf not in flist\n print(i, end=\"\", file=nf)\n flist.append(nf)\n\n fname_open = flist[0].name\n assert fname_open == repr(flist[0].buffer)\n\n for i in range(5):\n f = flist[i]\n f.close()\n fname_closed = flist[0].name\n assert fname_closed == repr(flist[0].buffer)\n assert fname_closed != fname_open\n tmpfile.seek(0)\n s = tmpfile.read()\n assert \"01234\" in repr(s)\n tmpfile.close()\n assert fname_closed == repr(flist[0].buffer)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_dupfile_on_bytesio_test_dupfile_on_textio.assert_not_hasattr_f_na": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_dupfile_on_bytesio_test_dupfile_on_textio.assert_not_hasattr_f_na", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 939, "end_line": 952, "span_ids": ["test_dupfile_on_textio", "test_dupfile_on_bytesio"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dupfile_on_bytesio():\n bio = io.BytesIO()\n f = capture.safe_text_dupfile(bio, \"wb\")\n f.write(\"hello\")\n assert bio.getvalue() == b\"hello\"\n assert \"BytesIO object\" in f.name\n\n\ndef test_dupfile_on_textio():\n tio = py.io.TextIO()\n f = capture.safe_text_dupfile(tio, \"wb\")\n f.write(\"hello\")\n assert tio.getvalue() == \"hello\"\n assert not hasattr(f, \"name\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_lsof_check_lsof_check.assert_len2_len1_3_o": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_lsof_check_lsof_check.assert_len2_len1_3_o", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 955, "end_line": 967, "span_ids": ["lsof_check"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@contextlib.contextmanager\ndef lsof_check():\n pid = os.getpid()\n try:\n out = subprocess.check_output((\"lsof\", \"-p\", str(pid))).decode()\n except (OSError, subprocess.CalledProcessError, UnicodeDecodeError):\n # about UnicodeDecodeError, see note on pytester\n pytest.skip(\"could not run 'lsof'\")\n yield\n out2 = subprocess.check_output((\"lsof\", \"-p\", str(pid))).decode()\n len1 = len([x for x in out.split(\"\\n\") if \"REG\" in x])\n len2 = len([x for x in out2.split(\"\\n\") if \"REG\" in x])\n assert len2 < len1 + 3, out2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestFDCapture_TestFDCapture.test_writeorg.with_open_tmpfile_name_.assert_stmp_data2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestFDCapture_TestFDCapture.test_writeorg.with_open_tmpfile_name_.assert_stmp_data2", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 970, "end_line": 1030, "span_ids": ["TestFDCapture.test_stdin", "TestFDCapture.test_simple_fail_second_start", "TestFDCapture.test_writeorg", "TestFDCapture.test_simple_many", "TestFDCapture.test_simple_many_check_open_files", "TestFDCapture.test_stderr", "TestFDCapture.test_simple", "TestFDCapture"], "tokens": 425}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFDCapture(object):\n pytestmark = needsosdup\n\n def test_simple(self, tmpfile):\n fd = tmpfile.fileno()\n cap = capture.FDCapture(fd)\n data = b\"hello\"\n os.write(fd, data)\n s = cap.snap()\n cap.done()\n assert not s\n cap = capture.FDCapture(fd)\n cap.start()\n os.write(fd, data)\n s = cap.snap()\n cap.done()\n assert s == \"hello\"\n\n def test_simple_many(self, tmpfile):\n for i in range(10):\n self.test_simple(tmpfile)\n\n def test_simple_many_check_open_files(self, testdir):\n with lsof_check():\n with testdir.makepyfile(\"\").open(\"wb+\") as tmpfile:\n self.test_simple_many(tmpfile)\n\n def test_simple_fail_second_start(self, tmpfile):\n fd = tmpfile.fileno()\n cap = capture.FDCapture(fd)\n cap.done()\n pytest.raises(ValueError, cap.start)\n\n def test_stderr(self):\n cap = capture.FDCapture(2)\n cap.start()\n print(\"hello\", file=sys.stderr)\n s = cap.snap()\n cap.done()\n assert s == \"hello\\n\"\n\n def test_stdin(self, tmpfile):\n cap = capture.FDCapture(0)\n cap.start()\n x = os.read(0, 100).strip()\n cap.done()\n assert x == b\"\"\n\n def test_writeorg(self, tmpfile):\n data1, data2 = b\"foo\", b\"bar\"\n cap = capture.FDCapture(tmpfile.fileno())\n cap.start()\n tmpfile.write(data1)\n tmpfile.flush()\n cap.writeorg(data2)\n scap = cap.snap()\n cap.done()\n assert scap == data1.decode(\"ascii\")\n with open(tmpfile.name, \"rb\") as stmp_file:\n stmp = stmp_file.read()\n assert stmp == data2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestFDCapture.test_simple_resume_suspend_saved_fd.try_.finally_.os_close_new_fd_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestFDCapture.test_simple_resume_suspend_saved_fd.try_.finally_.os_close_new_fd_", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 1032, "end_line": 1062, "span_ids": ["saved_fd", "TestFDCapture.test_simple_resume_suspend"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFDCapture(object):\n\n def test_simple_resume_suspend(self, tmpfile):\n with saved_fd(1):\n cap = capture.FDCapture(1)\n cap.start()\n data = b\"hello\"\n os.write(1, data)\n sys.stdout.write(\"whatever\")\n s = cap.snap()\n assert s == \"hellowhatever\"\n cap.suspend()\n os.write(1, b\"world\")\n sys.stdout.write(\"qlwkej\")\n assert not cap.snap()\n cap.resume()\n os.write(1, b\"but now\")\n sys.stdout.write(\" yes\\n\")\n s = cap.snap()\n assert s == \"but now yes\\n\"\n cap.suspend()\n cap.done()\n pytest.raises(AttributeError, cap.suspend)\n\n\n@contextlib.contextmanager\ndef saved_fd(fd):\n new_fd = os.dup(fd)\n try:\n yield\n finally:\n os.dup2(new_fd, fd)\n os.close(new_fd)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestStdCapture_TestStdCapture.test_reset_twice_error.assert_not_err": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestStdCapture_TestStdCapture.test_reset_twice_error.assert_not_err", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 1068, "end_line": 1137, "span_ids": ["TestStdCapture.test_capturing_reset_simple", "TestStdCapture", "TestStdCapture.test_reset_twice_error", "TestStdCapture.test_capturing_done_simple", "TestStdCapture.test_capturing_readouterr", "TestStdCapture.getcapture", "TestStdCapture.test_capture_results_accessible_by_attribute", "TestStdCapture.test_capturing_readouterr_decode_error_handling", "TestStdCapture.test_capturing_readouterr_unicode"], "tokens": 529}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestStdCapture(object):\n captureclass = staticmethod(StdCapture)\n\n @contextlib.contextmanager\n def getcapture(self, **kw):\n cap = self.__class__.captureclass(**kw)\n cap.start_capturing()\n try:\n yield cap\n finally:\n cap.stop_capturing()\n\n def test_capturing_done_simple(self):\n with self.getcapture() as cap:\n sys.stdout.write(\"hello\")\n sys.stderr.write(\"world\")\n out, err = cap.readouterr()\n assert out == \"hello\"\n assert err == \"world\"\n\n def test_capturing_reset_simple(self):\n with self.getcapture() as cap:\n print(\"hello world\")\n sys.stderr.write(\"hello error\\n\")\n out, err = cap.readouterr()\n assert out == \"hello world\\n\"\n assert err == \"hello error\\n\"\n\n def test_capturing_readouterr(self):\n with self.getcapture() as cap:\n print(\"hello world\")\n sys.stderr.write(\"hello error\\n\")\n out, err = cap.readouterr()\n assert out == \"hello world\\n\"\n assert err == \"hello error\\n\"\n sys.stderr.write(\"error2\")\n out, err = cap.readouterr()\n assert err == \"error2\"\n\n def test_capture_results_accessible_by_attribute(self):\n with self.getcapture() as cap:\n sys.stdout.write(\"hello\")\n sys.stderr.write(\"world\")\n capture_result = cap.readouterr()\n assert capture_result.out == \"hello\"\n assert capture_result.err == \"world\"\n\n def test_capturing_readouterr_unicode(self):\n with self.getcapture() as cap:\n print(\"hx\u0105\u0107\")\n out, err = cap.readouterr()\n assert out == u\"hx\u0105\u0107\\n\"\n\n @pytest.mark.skipif(\n \"sys.version_info >= (3,)\", reason=\"text output different for bytes on python3\"\n )\n def test_capturing_readouterr_decode_error_handling(self):\n with self.getcapture() as cap:\n # triggered an internal error in pytest\n print(\"\\xa6\")\n out, err = cap.readouterr()\n assert out == u\"\\ufffd\\n\"\n\n def test_reset_twice_error(self):\n with self.getcapture() as cap:\n print(\"hello\")\n out, err = cap.readouterr()\n pytest.raises(ValueError, cap.stop_capturing)\n assert out == \"hello\\n\"\n assert not err", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestStdCapture.test_capturing_modify_sysouterr_in_between_TestStdCapture.test_capturing_modify_sysouterr_in_between.assert_sys_stderr_olde": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestStdCapture.test_capturing_modify_sysouterr_in_between_TestStdCapture.test_capturing_modify_sysouterr_in_between.assert_sys_stderr_olde", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 1139, "end_line": 1153, "span_ids": ["TestStdCapture.test_capturing_modify_sysouterr_in_between"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestStdCapture(object):\n\n def test_capturing_modify_sysouterr_in_between(self):\n oldout = sys.stdout\n olderr = sys.stderr\n with self.getcapture() as cap:\n sys.stdout.write(\"hello\")\n sys.stderr.write(\"world\")\n sys.stdout = capture.CaptureIO()\n sys.stderr = capture.CaptureIO()\n print(\"not seen\")\n sys.stderr.write(\"not seen\\n\")\n out, err = cap.readouterr()\n assert out == \"hello\"\n assert err == \"world\"\n assert sys.stdout == oldout\n assert sys.stderr == olderr", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestStdCapture.test_capturing_error_recursive_TestStdCapture.test_stdin_nulled_by_default.with_self_getcapture_.pytest_raises_IOError_sy": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestStdCapture.test_capturing_error_recursive_TestStdCapture.test_stdin_nulled_by_default.with_self_getcapture_.pytest_raises_IOError_sy", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 1155, "end_line": 1193, "span_ids": ["TestStdCapture.test_just_err_capture", "TestStdCapture.test_stdin_restored", "TestStdCapture.test_capturing_error_recursive", "TestStdCapture.test_just_out_capture", "TestStdCapture.test_stdin_nulled_by_default"], "tokens": 317}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestStdCapture(object):\n\n def test_capturing_error_recursive(self):\n with self.getcapture() as cap1:\n print(\"cap1\")\n with self.getcapture() as cap2:\n print(\"cap2\")\n out2, err2 = cap2.readouterr()\n out1, err1 = cap1.readouterr()\n assert out1 == \"cap1\\n\"\n assert out2 == \"cap2\\n\"\n\n def test_just_out_capture(self):\n with self.getcapture(out=True, err=False) as cap:\n sys.stdout.write(\"hello\")\n sys.stderr.write(\"world\")\n out, err = cap.readouterr()\n assert out == \"hello\"\n assert not err\n\n def test_just_err_capture(self):\n with self.getcapture(out=False, err=True) as cap:\n sys.stdout.write(\"hello\")\n sys.stderr.write(\"world\")\n out, err = cap.readouterr()\n assert err == \"world\"\n assert not out\n\n def test_stdin_restored(self):\n old = sys.stdin\n with self.getcapture(in_=True):\n newstdin = sys.stdin\n assert newstdin != sys.stdin\n assert sys.stdin is old\n\n def test_stdin_nulled_by_default(self):\n print(\"XXX this test may well hang instead of crashing\")\n print(\"XXX which indicates an error in the underlying capturing\")\n print(\"XXX mechanisms\")\n with self.getcapture():\n pytest.raises(IOError, sys.stdin.read)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestStdCaptureFD_TestStdCaptureFD.test_many.with_lsof_check_.for_i_in_range_10_.cap_stop_capturing_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestStdCaptureFD_TestStdCaptureFD.test_many.with_lsof_check_.for_i_in_range_10_.cap_stop_capturing_", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 1196, "end_line": 1236, "span_ids": ["TestStdCaptureFD.test_intermingling", "TestStdCaptureFD.test_many", "TestStdCaptureFD.test_simple_only_fd", "TestStdCaptureFD"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestStdCaptureFD(TestStdCapture):\n pytestmark = needsosdup\n captureclass = staticmethod(StdCaptureFD)\n\n def test_simple_only_fd(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import os\n def test_x():\n os.write(1, \"hello\\\\n\".encode(\"ascii\"))\n assert 0\n \"\"\"\n )\n result = testdir.runpytest_subprocess()\n result.stdout.fnmatch_lines(\n \"\"\"\n *test_x*\n *assert 0*\n *Captured stdout*\n \"\"\"\n )\n\n def test_intermingling(self):\n with self.getcapture() as cap:\n os.write(1, b\"1\")\n sys.stdout.write(str(2))\n sys.stdout.flush()\n os.write(1, b\"3\")\n os.write(2, b\"a\")\n sys.stderr.write(\"b\")\n sys.stderr.flush()\n os.write(2, b\"c\")\n out, err = cap.readouterr()\n assert out == \"123\"\n assert err == \"abc\"\n\n def test_many(self, capfd):\n with lsof_check():\n for i in range(10):\n cap = StdCaptureFD()\n cap.stop_capturing()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestStdCaptureFDinvalidFD_TestStdCaptureFDinvalidFD.test_stdcapture_fd_invalid_fd.assert_result_parseoutcom": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_TestStdCaptureFDinvalidFD_TestStdCaptureFDinvalidFD.test_stdcapture_fd_invalid_fd.assert_result_parseoutcom", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 1239, "end_line": 1272, "span_ids": ["TestStdCaptureFDinvalidFD", "TestStdCaptureFDinvalidFD.test_stdcapture_fd_invalid_fd"], "tokens": 285}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestStdCaptureFDinvalidFD(object):\n pytestmark = needsosdup\n\n def test_stdcapture_fd_invalid_fd(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import os\n from _pytest import capture\n\n def StdCaptureFD(out=True, err=True, in_=True):\n return capture.MultiCapture(out, err, in_, Capture=capture.FDCapture)\n\n def test_stdout():\n os.close(1)\n cap = StdCaptureFD(out=True, err=False, in_=False)\n assert repr(cap.out) == \"\"\n cap.stop_capturing()\n\n def test_stderr():\n os.close(2)\n cap = StdCaptureFD(out=False, err=True, in_=False)\n assert repr(cap.err) == \"\"\n cap.stop_capturing()\n\n def test_stdin():\n os.close(0)\n cap = StdCaptureFD(out=False, err=False, in_=True)\n assert repr(cap.in_) == \"\"\n cap.stop_capturing()\n \"\"\"\n )\n result = testdir.runpytest_subprocess(\"--capture=fd\")\n assert result.ret == 0\n assert result.parseoutcomes()[\"passed\"] == 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_capture_not_started_but_reset_test_fdcapture_tmpfile_remains_the_same.assert_capfile2_capfil": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_capture_not_started_but_reset_test_fdcapture_tmpfile_remains_the_same.assert_capfile2_capfil", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 1275, "end_line": 1310, "span_ids": ["test_capture_not_started_but_reset", "test_fdcapture_tmpfile_remains_the_same", "test_capsys_results_accessible_by_attribute", "test_using_capsys_fixture_works_with_sys_stdout_encoding"], "tokens": 247}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_capture_not_started_but_reset():\n capsys = StdCapture()\n capsys.stop_capturing()\n\n\ndef test_using_capsys_fixture_works_with_sys_stdout_encoding(capsys):\n test_text = \"test text\"\n\n print(test_text.encode(sys.stdout.encoding, \"replace\"))\n (out, err) = capsys.readouterr()\n assert out\n assert err == \"\"\n\n\ndef test_capsys_results_accessible_by_attribute(capsys):\n sys.stdout.write(\"spam\")\n sys.stderr.write(\"eggs\")\n capture_result = capsys.readouterr()\n assert capture_result.out == \"spam\"\n assert capture_result.err == \"eggs\"\n\n\n@needsosdup\n@pytest.mark.parametrize(\"use\", [True, False])\ndef test_fdcapture_tmpfile_remains_the_same(tmpfile, use):\n if not use:\n tmpfile = True\n cap = StdCaptureFD(out=False, err=tmpfile)\n try:\n cap.start_capturing()\n capfile = cap.err.tmpfile\n cap.readouterr()\n finally:\n cap.stop_capturing()\n capfile2 = cap.err.tmpfile\n assert capfile2 == capfile", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_close_and_capture_again_test_close_and_capture_again.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_close_and_capture_again_test_close_and_capture_again.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 1313, "end_line": 1333, "span_ids": ["test_close_and_capture_again"], "tokens": 108}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@needsosdup\ndef test_close_and_capture_again(testdir):\n testdir.makepyfile(\n \"\"\"\n import os\n def test_close():\n os.close(1)\n def test_capture_again():\n os.write(1, b\"hello\\\\n\")\n assert 0\n \"\"\"\n )\n result = testdir.runpytest_subprocess()\n result.stdout.fnmatch_lines(\n \"\"\"\n *test_capture_again*\n *assert 0*\n *stdout*\n *hello*\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_capturing_and_logging_fundamentals_test_capturing_and_logging_fundamentals.assert_atexit_not_in_re": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_capturing_and_logging_fundamentals_test_capturing_and_logging_fundamentals.assert_atexit_not_in_re", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 1336, "end_line": 1375, "span_ids": ["test_capturing_and_logging_fundamentals"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"method\", [\"SysCapture\", \"FDCapture\"])\ndef test_capturing_and_logging_fundamentals(testdir, method):\n if method == \"StdCaptureFD\" and not hasattr(os, \"dup\"):\n pytest.skip(\"need os.dup\")\n # here we check a fundamental feature\n p = testdir.makepyfile(\n \"\"\"\n import sys, os\n import py, logging\n from _pytest import capture\n cap = capture.MultiCapture(out=False, in_=False,\n Capture=capture.%s)\n cap.start_capturing()\n\n logging.warning(\"hello1\")\n outerr = cap.readouterr()\n print(\"suspend, captured %%s\" %%(outerr,))\n logging.warning(\"hello2\")\n\n cap.pop_outerr_to_orig()\n logging.warning(\"hello3\")\n\n outerr = cap.readouterr()\n print(\"suspend2, captured %%s\" %% (outerr,))\n \"\"\"\n % (method,)\n )\n result = testdir.runpython(p)\n result.stdout.fnmatch_lines(\n \"\"\"\n suspend, captured*hello1*\n suspend2, captured*WARNING:root:hello3*\n \"\"\"\n )\n result.stderr.fnmatch_lines(\n \"\"\"\n WARNING:root:hello2\n \"\"\"\n )\n assert \"atexit\" not in result.stderr.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_error_attribute_issue555_test_py36_windowsconsoleio_workaround_non_standard_streams._py36_windowsconsoleio_wo": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_error_attribute_issue555_test_py36_windowsconsoleio_workaround_non_standard_streams._py36_windowsconsoleio_wo", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 1378, "end_line": 1407, "span_ids": ["test_error_attribute_issue555", "test_py36_windowsconsoleio_workaround_non_standard_streams.DummyStream", "test_py36_windowsconsoleio_workaround_non_standard_streams.DummyStream.write", "test_py36_windowsconsoleio_workaround_non_standard_streams"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_error_attribute_issue555(testdir):\n testdir.makepyfile(\n \"\"\"\n import sys\n def test_capattr():\n assert sys.stdout.errors == \"strict\"\n assert sys.stderr.errors == \"strict\"\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)\n\n\n@pytest.mark.skipif(\n not sys.platform.startswith(\"win\") and sys.version_info[:2] >= (3, 6),\n reason=\"only py3.6+ on windows\",\n)\ndef test_py36_windowsconsoleio_workaround_non_standard_streams():\n \"\"\"\n Ensure _py36_windowsconsoleio_workaround function works with objects that\n do not implement the full ``io``-based stream protocol, for example execnet channels (#2666).\n \"\"\"\n from _pytest.capture import _py36_windowsconsoleio_workaround\n\n class DummyStream(object):\n def write(self, s):\n pass\n\n stream = DummyStream()\n _py36_windowsconsoleio_workaround(stream)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_dontreadfrominput_has_encoding_test_pickling_and_unpickling_encoded_file.pickle_loads_ef_as_str_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_dontreadfrominput_has_encoding_test_pickling_and_unpickling_encoded_file.pickle_loads_ef_as_str_", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 1410, "end_line": 1464, "span_ids": ["test_pickling_and_unpickling_encoded_file", "test_crash_on_closing_tmpfile_py27", "test_dontreadfrominput_has_encoding"], "tokens": 328}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_dontreadfrominput_has_encoding(testdir):\n testdir.makepyfile(\n \"\"\"\n import sys\n def test_capattr():\n # should not raise AttributeError\n assert sys.stdout.encoding\n assert sys.stderr.encoding\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)\n\n\ndef test_crash_on_closing_tmpfile_py27(testdir):\n p = testdir.makepyfile(\n \"\"\"\n from __future__ import print_function\n import threading\n import sys\n\n printing = threading.Event()\n\n def spam():\n f = sys.stderr\n print('SPAMBEFORE', end='', file=f)\n printing.set()\n\n while True:\n try:\n f.flush()\n except (OSError, ValueError):\n break\n\n def test_spam_in_thread():\n t = threading.Thread(target=spam)\n t.daemon = True\n t.start()\n\n printing.wait()\n \"\"\"\n )\n result = testdir.runpytest_subprocess(str(p))\n assert result.ret == 0\n assert result.stderr.str() == \"\"\n assert \"IOError\" not in result.stdout.str()\n\n\ndef test_pickling_and_unpickling_encoded_file():\n # See https://bitbucket.org/pytest-dev/pytest/pull-request/194\n # pickle.loads() raises infinite recursion if\n # EncodedFile.__getattr__ is not implemented properly\n ef = capture.EncodedFile(None, None)\n ef_as_str = pickle.dumps(ef)\n pickle.loads(ef_as_str)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_global_capture_with_live_logging_test_global_capture_with_live_logging.None_7": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_global_capture_with_live_logging_test_global_capture_with_live_logging.None_7", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 1467, "end_line": 1522, "span_ids": ["test_global_capture_with_live_logging"], "tokens": 365}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_global_capture_with_live_logging(testdir):\n # Issue 3819\n # capture should work with live cli logging\n\n # Teardown report seems to have the capture for the whole process (setup, capture, teardown)\n testdir.makeconftest(\n \"\"\"\n def pytest_runtest_logreport(report):\n if \"test_global\" in report.nodeid:\n if report.when == \"teardown\":\n with open(\"caplog\", \"w\") as f:\n f.write(report.caplog)\n with open(\"capstdout\", \"w\") as f:\n f.write(report.capstdout)\n \"\"\"\n )\n\n testdir.makepyfile(\n \"\"\"\n import logging\n import sys\n import pytest\n\n logger = logging.getLogger(__name__)\n\n @pytest.fixture\n def fix1():\n print(\"fix setup\")\n logging.info(\"fix setup\")\n yield\n logging.info(\"fix teardown\")\n print(\"fix teardown\")\n\n def test_global(fix1):\n print(\"begin test\")\n logging.info(\"something in test\")\n print(\"end test\")\n \"\"\"\n )\n result = testdir.runpytest_subprocess(\"--log-cli-level=INFO\")\n assert result.ret == 0\n\n with open(\"caplog\", \"r\") as f:\n caplog = f.read()\n\n assert \"fix setup\" in caplog\n assert \"something in test\" in caplog\n assert \"fix teardown\" in caplog\n\n with open(\"capstdout\", \"r\") as f:\n capstdout = f.read()\n\n assert \"fix setup\" in capstdout\n assert \"begin test\" in capstdout\n assert \"end test\" in capstdout\n assert \"fix teardown\" in capstdout", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_capture_with_live_logging_test_capture_with_live_logging.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_capture_with_live_logging_test_capture_with_live_logging.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 1525, "end_line": 1556, "span_ids": ["test_capture_with_live_logging"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"capture_fixture\", [\"capsys\", \"capfd\"])\ndef test_capture_with_live_logging(testdir, capture_fixture):\n # Issue 3819\n # capture should work with live cli logging\n\n testdir.makepyfile(\n \"\"\"\n import logging\n import sys\n\n logger = logging.getLogger(__name__)\n\n def test_capture({0}):\n print(\"hello\")\n sys.stderr.write(\"world\\\\n\")\n captured = {0}.readouterr()\n assert captured.out == \"hello\\\\n\"\n assert captured.err == \"world\\\\n\"\n\n logging.info(\"something\")\n print(\"next\")\n logging.info(\"something\")\n\n captured = {0}.readouterr()\n assert captured.out == \"next\\\\n\"\n \"\"\".format(\n capture_fixture\n )\n )\n\n result = testdir.runpytest_subprocess(\"--log-cli-level=INFO\")\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_typeerror_encodedfile_write_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_capture.py_test_typeerror_encodedfile_write_", "embedding": null, "metadata": {"file_path": "testing/test_capture.py", "file_name": "test_capture.py", "file_type": "text/x-python", "category": "test", "start_line": 1559, "end_line": 1580, "span_ids": ["test_typeerror_encodedfile_write"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_typeerror_encodedfile_write(testdir):\n \"\"\"It should behave the same with and without output capturing (#4861).\"\"\"\n p = testdir.makepyfile(\n \"\"\"\n def test_fails():\n import sys\n sys.stdout.write(b\"foo\")\n \"\"\"\n )\n result_without_capture = testdir.runpytest(\"-s\", str(p))\n\n result_with_capture = testdir.runpytest(str(p))\n\n assert result_with_capture.ret == result_without_capture.ret\n\n if _PY3:\n result_with_capture.stdout.fnmatch_lines(\n [\"E TypeError: write() argument must be str, not bytes\"]\n )\n else:\n assert result_with_capture.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_from___future___import_ab_TestCollector.test_check_equality.for_fn_in_fn1_fn2_fn3_.assert_modcol_fn": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_from___future___import_ab_TestCollector.test_check_equality.for_fn_in_fn1_fn2_fn3_.assert_modcol_fn", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 54, "span_ids": ["TestCollector.test_collect_versus_item", "imports", "TestCollector", "TestCollector.test_check_equality"], "tokens": 377}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport pprint\nimport sys\nimport textwrap\n\nimport py\n\nimport pytest\nfrom _pytest.main import _in_venv\nfrom _pytest.main import EXIT_INTERRUPTED\nfrom _pytest.main import EXIT_NOTESTSCOLLECTED\nfrom _pytest.main import Session\n\n\nclass TestCollector(object):\n def test_collect_versus_item(self):\n from pytest import Collector, Item\n\n assert not issubclass(Collector, Item)\n assert not issubclass(Item, Collector)\n\n def test_check_equality(self, testdir):\n modcol = testdir.getmodulecol(\n \"\"\"\n def test_pass(): pass\n def test_fail(): assert 0\n \"\"\"\n )\n fn1 = testdir.collect_by_name(modcol, \"test_pass\")\n assert isinstance(fn1, pytest.Function)\n fn2 = testdir.collect_by_name(modcol, \"test_pass\")\n assert isinstance(fn2, pytest.Function)\n\n assert fn1 == fn2\n assert fn1 != modcol\n if sys.version_info < (3, 0):\n assert cmp(fn1, fn2) == 0 # NOQA\n assert hash(fn1) == hash(fn2)\n\n fn3 = testdir.collect_by_name(modcol, \"test_fail\")\n assert isinstance(fn3, pytest.Function)\n assert not (fn1 == fn3)\n assert fn1 != fn3\n\n for fn in fn1, fn2, fn3:\n assert fn != 3\n assert fn != modcol\n assert fn != [1, 2, 3]\n assert [1, 2, 3] != fn\n assert modcol != fn", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollector.test_getparent_TestCollector.test_getparent.assert_parent_is_cls": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollector.test_getparent_TestCollector.test_getparent.assert_parent_is_cls", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 56, "end_line": 74, "span_ids": ["TestCollector.test_getparent"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCollector(object):\n\n def test_getparent(self, testdir):\n modcol = testdir.getmodulecol(\n \"\"\"\n class TestClass(object):\n def test_foo():\n pass\n \"\"\"\n )\n cls = testdir.collect_by_name(modcol, \"TestClass\")\n fn = testdir.collect_by_name(testdir.collect_by_name(cls, \"()\"), \"test_foo\")\n\n parent = fn.getparent(pytest.Module)\n assert parent is modcol\n\n parent = fn.getparent(pytest.Function)\n assert parent is fn\n\n parent = fn.getparent(pytest.Class)\n assert parent is cls", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollector.test_getcustomfile_roundtrip_TestCollector.test_getcustomfile_roundtrip.assert_isinstance_nodes_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollector.test_getcustomfile_roundtrip_TestCollector.test_getcustomfile_roundtrip.assert_isinstance_nodes_0", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 76, "end_line": 93, "span_ids": ["TestCollector.test_getcustomfile_roundtrip"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCollector(object):\n\n def test_getcustomfile_roundtrip(self, testdir):\n hello = testdir.makefile(\".xxx\", hello=\"world\")\n testdir.makepyfile(\n conftest=\"\"\"\n import pytest\n class CustomFile(pytest.File):\n pass\n def pytest_collect_file(path, parent):\n if path.ext == \".xxx\":\n return CustomFile(path, parent=parent)\n \"\"\"\n )\n node = testdir.getpathnode(hello)\n assert isinstance(node, pytest.File)\n assert node.name == \"hello.xxx\"\n nodes = node.session.perform_collect([node.nodeid], genitems=False)\n assert len(nodes) == 1\n assert isinstance(nodes[0], pytest.File)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollector.test_can_skip_class_with_test_attr_TestCollector.test_can_skip_class_with_test_attr.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollector.test_can_skip_class_with_test_attr_TestCollector.test_can_skip_class_with_test_attr.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 95, "end_line": 108, "span_ids": ["TestCollector.test_can_skip_class_with_test_attr"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCollector(object):\n\n def test_can_skip_class_with_test_attr(self, testdir):\n \"\"\"Assure test class is skipped when using `__test__=False` (See #2007).\"\"\"\n testdir.makepyfile(\n \"\"\"\n class TestFoo(object):\n __test__ = False\n def __init__(self):\n pass\n def test_foo():\n assert True\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"collected 0 items\", \"*no tests ran in*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollectFS_TestCollectFS.test_ignored_certain_directories.assert_test_found_in_s": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollectFS_TestCollectFS.test_ignored_certain_directories.assert_test_found_in_s", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 111, "end_line": 128, "span_ids": ["TestCollectFS.test_ignored_certain_directories", "TestCollectFS"], "tokens": 195}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCollectFS(object):\n def test_ignored_certain_directories(self, testdir):\n tmpdir = testdir.tmpdir\n tmpdir.ensure(\"build\", \"test_notfound.py\")\n tmpdir.ensure(\"dist\", \"test_notfound.py\")\n tmpdir.ensure(\"_darcs\", \"test_notfound.py\")\n tmpdir.ensure(\"CVS\", \"test_notfound.py\")\n tmpdir.ensure(\"{arch}\", \"test_notfound.py\")\n tmpdir.ensure(\".whatever\", \"test_notfound.py\")\n tmpdir.ensure(\".bzr\", \"test_notfound.py\")\n tmpdir.ensure(\"normal\", \"test_found.py\")\n for x in tmpdir.visit(\"test_*.py\"):\n x.write(\"def test_hello(): pass\")\n\n result = testdir.runpytest(\"--collect-only\")\n s = result.stdout.str()\n assert \"test_notfound\" not in s\n assert \"test_found\" in s", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollectFS.test_ignored_virtualenvs_TestCollectFS.test_ignored_virtualenvs.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollectFS.test_ignored_virtualenvs_TestCollectFS.test_ignored_virtualenvs.None_2", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 130, "end_line": 155, "span_ids": ["TestCollectFS.test_ignored_virtualenvs"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCollectFS(object):\n\n @pytest.mark.parametrize(\n \"fname\",\n (\n \"activate\",\n \"activate.csh\",\n \"activate.fish\",\n \"Activate\",\n \"Activate.bat\",\n \"Activate.ps1\",\n ),\n )\n def test_ignored_virtualenvs(self, testdir, fname):\n bindir = \"Scripts\" if sys.platform.startswith(\"win\") else \"bin\"\n testdir.tmpdir.ensure(\"virtual\", bindir, fname)\n testfile = testdir.tmpdir.ensure(\"virtual\", \"test_invenv.py\")\n testfile.write(\"def test_hello(): pass\")\n\n # by default, ignore tests inside a virtualenv\n result = testdir.runpytest()\n assert \"test_invenv\" not in result.stdout.str()\n # allow test collection if user insists\n result = testdir.runpytest(\"--collect-in-virtualenv\")\n assert \"test_invenv\" in result.stdout.str()\n # allow test collection if user directly passes in the directory\n result = testdir.runpytest(\"virtual\")\n assert \"test_invenv\" in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollectFS.test_ignored_virtualenvs_norecursedirs_precedence_TestCollectFS.test_ignored_virtualenvs_norecursedirs_precedence.assert_test_invenv_in_r": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollectFS.test_ignored_virtualenvs_norecursedirs_precedence_TestCollectFS.test_ignored_virtualenvs_norecursedirs_precedence.assert_test_invenv_in_r", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 157, "end_line": 178, "span_ids": ["TestCollectFS.test_ignored_virtualenvs_norecursedirs_precedence"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCollectFS(object):\n\n @pytest.mark.parametrize(\n \"fname\",\n (\n \"activate\",\n \"activate.csh\",\n \"activate.fish\",\n \"Activate\",\n \"Activate.bat\",\n \"Activate.ps1\",\n ),\n )\n def test_ignored_virtualenvs_norecursedirs_precedence(self, testdir, fname):\n bindir = \"Scripts\" if sys.platform.startswith(\"win\") else \"bin\"\n # norecursedirs takes priority\n testdir.tmpdir.ensure(\".virtual\", bindir, fname)\n testfile = testdir.tmpdir.ensure(\".virtual\", \"test_invenv.py\")\n testfile.write(\"def test_hello(): pass\")\n result = testdir.runpytest(\"--collect-in-virtualenv\")\n assert \"test_invenv\" not in result.stdout.str()\n # ...unless the virtualenv is explicitly given on the CLI\n result = testdir.runpytest(\"--collect-in-virtualenv\", \".virtual\")\n assert \"test_invenv\" in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollectFS.test__in_venv_TestCollectFS.test__in_venv.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollectFS.test__in_venv_TestCollectFS.test__in_venv.None_1", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 180, "end_line": 199, "span_ids": ["TestCollectFS.test__in_venv"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCollectFS(object):\n\n @pytest.mark.parametrize(\n \"fname\",\n (\n \"activate\",\n \"activate.csh\",\n \"activate.fish\",\n \"Activate\",\n \"Activate.bat\",\n \"Activate.ps1\",\n ),\n )\n def test__in_venv(self, testdir, fname):\n \"\"\"Directly test the virtual env detection function\"\"\"\n bindir = \"Scripts\" if sys.platform.startswith(\"win\") else \"bin\"\n # no bin/activate, not a virtualenv\n base_path = testdir.tmpdir.mkdir(\"venv\")\n assert _in_venv(base_path) is False\n # with bin/activate, totally a virtualenv\n base_path.ensure(bindir, fname)\n assert _in_venv(base_path) is True", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollectFS.test_custom_norecursedirs_TestCollectFS.test_custom_norecursedirs.rec_assertoutcome_failed_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollectFS.test_custom_norecursedirs_TestCollectFS.test_custom_norecursedirs.rec_assertoutcome_failed_", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 201, "end_line": 215, "span_ids": ["TestCollectFS.test_custom_norecursedirs"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCollectFS(object):\n\n def test_custom_norecursedirs(self, testdir):\n testdir.makeini(\n \"\"\"\n [pytest]\n norecursedirs = mydir xyz*\n \"\"\"\n )\n tmpdir = testdir.tmpdir\n tmpdir.ensure(\"mydir\", \"test_hello.py\").write(\"def test_1(): pass\")\n tmpdir.ensure(\"xyz123\", \"test_2.py\").write(\"def test_2(): 0/0\")\n tmpdir.ensure(\"xy\", \"test_ok.py\").write(\"def test_3(): pass\")\n rec = testdir.inline_run()\n rec.assertoutcome(passed=1)\n rec = testdir.inline_run(\"xyz123/test_2.py\")\n rec.assertoutcome(failed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollectFS.test_testpaths_ini_TestCollectFS.test_testpaths_ini.None_1.assert_x_name_for_x_in_i": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollectFS.test_testpaths_ini_TestCollectFS.test_testpaths_ini.None_1.assert_x_name_for_x_in_i", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 217, "end_line": 245, "span_ids": ["TestCollectFS.test_testpaths_ini"], "tokens": 316}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCollectFS(object):\n\n def test_testpaths_ini(self, testdir, monkeypatch):\n testdir.makeini(\n \"\"\"\n [pytest]\n testpaths = gui uts\n \"\"\"\n )\n tmpdir = testdir.tmpdir\n tmpdir.ensure(\"env\", \"test_1.py\").write(\"def test_env(): pass\")\n tmpdir.ensure(\"gui\", \"test_2.py\").write(\"def test_gui(): pass\")\n tmpdir.ensure(\"uts\", \"test_3.py\").write(\"def test_uts(): pass\")\n\n # executing from rootdir only tests from `testpaths` directories\n # are collected\n items, reprec = testdir.inline_genitems(\"-v\")\n assert [x.name for x in items] == [\"test_gui\", \"test_uts\"]\n\n # check that explicitly passing directories in the command-line\n # collects the tests\n for dirname in (\"env\", \"gui\", \"uts\"):\n items, reprec = testdir.inline_genitems(tmpdir.join(dirname))\n assert [x.name for x in items] == [\"test_%s\" % dirname]\n\n # changing cwd to each subdirectory and running pytest without\n # arguments collects the tests in that directory normally\n for dirname in (\"env\", \"gui\", \"uts\"):\n monkeypatch.chdir(testdir.tmpdir.join(dirname))\n items, reprec = testdir.inline_genitems()\n assert [x.name for x in items] == [\"test_%s\" % dirname]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollectPluginHookRelay_TestCollectPluginHookRelay.test_pytest_collect_directory.assert_world_in_wascall": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCollectPluginHookRelay_TestCollectPluginHookRelay.test_pytest_collect_directory.assert_world_in_wascall", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 248, "end_line": 274, "span_ids": ["TestCollectPluginHookRelay.test_pytest_collect_directory", "TestCollectPluginHookRelay.test_pytest_collect_file.Plugin", "TestCollectPluginHookRelay.test_pytest_collect_file.Plugin.pytest_collect_file", "TestCollectPluginHookRelay.test_pytest_collect_directory.Plugin.pytest_collect_directory", "TestCollectPluginHookRelay.test_pytest_collect_directory.Plugin", "TestCollectPluginHookRelay", "TestCollectPluginHookRelay.test_pytest_collect_file"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCollectPluginHookRelay(object):\n def test_pytest_collect_file(self, testdir):\n wascalled = []\n\n class Plugin(object):\n def pytest_collect_file(self, path, parent):\n if not path.basename.startswith(\".\"):\n # Ignore hidden files, e.g. .testmondata.\n wascalled.append(path)\n\n testdir.makefile(\".abc\", \"xyz\")\n pytest.main([testdir.tmpdir], plugins=[Plugin()])\n assert len(wascalled) == 1\n assert wascalled[0].ext == \".abc\"\n\n def test_pytest_collect_directory(self, testdir):\n wascalled = []\n\n class Plugin(object):\n def pytest_collect_directory(self, path, parent):\n wascalled.append(path.basename)\n\n testdir.mkdir(\"hello\")\n testdir.mkdir(\"world\")\n pytest.main(testdir.tmpdir, plugins=[Plugin()])\n assert \"hello\" in wascalled\n assert \"world\" in wascalled", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestPrunetraceback_TestPrunetraceback.test_custom_repr_failure.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestPrunetraceback_TestPrunetraceback.test_custom_repr_failure.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 277, "end_line": 302, "span_ids": ["TestPrunetraceback", "TestPrunetraceback.test_custom_repr_failure"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPrunetraceback(object):\n def test_custom_repr_failure(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n import not_exists\n \"\"\"\n )\n testdir.makeconftest(\n \"\"\"\n import pytest\n def pytest_collect_file(path, parent):\n return MyFile(path, parent)\n class MyError(Exception):\n pass\n class MyFile(pytest.File):\n def collect(self):\n raise MyError()\n def repr_failure(self, excinfo):\n if excinfo.errisinstance(MyError):\n return \"hello world\"\n return pytest.File.repr_failure(self, excinfo)\n \"\"\"\n )\n\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines([\"*ERROR collecting*\", \"*hello world*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestPrunetraceback.test_collect_report_postprocessing_TestPrunetraceback.test_collect_report_postprocessing.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestPrunetraceback.test_collect_report_postprocessing_TestPrunetraceback.test_collect_report_postprocessing.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 304, "end_line": 323, "span_ids": ["TestPrunetraceback.test_collect_report_postprocessing"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPrunetraceback(object):\n\n @pytest.mark.xfail(reason=\"other mechanism for adding to reporting needed\")\n def test_collect_report_postprocessing(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n import not_exists\n \"\"\"\n )\n testdir.makeconftest(\n \"\"\"\n import pytest\n @pytest.hookimpl(hookwrapper=True)\n def pytest_make_collect_report():\n outcome = yield\n rep = outcome.get_result()\n rep.headerlines += [\"header1\"]\n outcome.force_result(rep)\n \"\"\"\n )\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines([\"*ERROR collecting*\", \"*header1*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCustomConftests_TestCustomConftests.test_ignore_collect_path.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCustomConftests_TestCustomConftests.test_ignore_collect_path.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 326, "end_line": 342, "span_ids": ["TestCustomConftests", "TestCustomConftests.test_ignore_collect_path"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCustomConftests(object):\n def test_ignore_collect_path(self, testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_ignore_collect(path, config):\n return path.basename.startswith(\"x\") or \\\n path.basename == \"test_one.py\"\n \"\"\"\n )\n sub = testdir.mkdir(\"xy123\")\n sub.ensure(\"test_hello.py\").write(\"syntax error\")\n sub.join(\"conftest.py\").write(\"syntax error\")\n testdir.makepyfile(\"def test_hello(): pass\")\n testdir.makepyfile(test_one=\"syntax error\")\n result = testdir.runpytest(\"--fulltrace\")\n assert result.ret == 0\n result.stdout.fnmatch_lines([\"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCustomConftests.test_ignore_collect_not_called_on_argument_TestCustomConftests.test_ignore_collect_not_called_on_argument.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCustomConftests.test_ignore_collect_not_called_on_argument_TestCustomConftests.test_ignore_collect_not_called_on_argument.None_2", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 344, "end_line": 357, "span_ids": ["TestCustomConftests.test_ignore_collect_not_called_on_argument"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCustomConftests(object):\n\n def test_ignore_collect_not_called_on_argument(self, testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_ignore_collect(path, config):\n return True\n \"\"\"\n )\n p = testdir.makepyfile(\"def test_hello(): pass\")\n result = testdir.runpytest(p)\n assert result.ret == 0\n result.stdout.fnmatch_lines([\"*1 passed*\"])\n result = testdir.runpytest()\n assert result.ret == EXIT_NOTESTSCOLLECTED\n result.stdout.fnmatch_lines([\"*collected 0 items*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCustomConftests.test_collectignore_exclude_on_option_TestCustomConftests.test_collectignore_exclude_on_option.assert_passed_in_result": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCustomConftests.test_collectignore_exclude_on_option_TestCustomConftests.test_collectignore_exclude_on_option.assert_passed_in_result", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 359, "end_line": 377, "span_ids": ["TestCustomConftests.test_collectignore_exclude_on_option"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCustomConftests(object):\n\n def test_collectignore_exclude_on_option(self, testdir):\n testdir.makeconftest(\n \"\"\"\n collect_ignore = ['hello', 'test_world.py']\n def pytest_addoption(parser):\n parser.addoption(\"--XX\", action=\"store_true\", default=False)\n def pytest_configure(config):\n if config.getvalue(\"XX\"):\n collect_ignore[:] = []\n \"\"\"\n )\n testdir.mkdir(\"hello\")\n testdir.makepyfile(test_world=\"def test_hello(): pass\")\n result = testdir.runpytest()\n assert result.ret == EXIT_NOTESTSCOLLECTED\n assert \"passed\" not in result.stdout.str()\n result = testdir.runpytest(\"--XX\")\n assert result.ret == 0\n assert \"passed\" in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCustomConftests.test_collectignoreglob_exclude_on_option_TestCustomConftests.test_collectignoreglob_exclude_on_option.None_4": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCustomConftests.test_collectignoreglob_exclude_on_option_TestCustomConftests.test_collectignoreglob_exclude_on_option.None_4", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 379, "end_line": 397, "span_ids": ["TestCustomConftests.test_collectignoreglob_exclude_on_option"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCustomConftests(object):\n\n def test_collectignoreglob_exclude_on_option(self, testdir):\n testdir.makeconftest(\n \"\"\"\n collect_ignore_glob = ['*w*l[dt]*']\n def pytest_addoption(parser):\n parser.addoption(\"--XX\", action=\"store_true\", default=False)\n def pytest_configure(config):\n if config.getvalue(\"XX\"):\n collect_ignore_glob[:] = []\n \"\"\"\n )\n testdir.makepyfile(test_world=\"def test_hello(): pass\")\n testdir.makepyfile(test_welt=\"def test_hallo(): pass\")\n result = testdir.runpytest()\n assert result.ret == EXIT_NOTESTSCOLLECTED\n result.stdout.fnmatch_lines([\"*collected 0 items*\"])\n result = testdir.runpytest(\"--XX\")\n assert result.ret == 0\n result.stdout.fnmatch_lines([\"*2 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCustomConftests.test_pytest_fs_collect_hooks_are_seen_TestCustomConftests.test_pytest_fs_collect_hooks_are_seen.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCustomConftests.test_pytest_fs_collect_hooks_are_seen_TestCustomConftests.test_pytest_fs_collect_hooks_are_seen.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 399, "end_line": 413, "span_ids": ["TestCustomConftests.test_pytest_fs_collect_hooks_are_seen"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCustomConftests(object):\n\n def test_pytest_fs_collect_hooks_are_seen(self, testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n class MyModule(pytest.Module):\n pass\n def pytest_collect_file(path, parent):\n if path.ext == \".py\":\n return MyModule(path, parent)\n \"\"\"\n )\n testdir.mkdir(\"sub\")\n testdir.makepyfile(\"def test_x(): pass\")\n result = testdir.runpytest(\"--collect-only\")\n result.stdout.fnmatch_lines([\"*MyModule*\", \"*test_x*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCustomConftests.test_pytest_collect_file_from_sister_dir_TestCustomConftests.test_pytest_collect_file_from_sister_dir.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestCustomConftests.test_pytest_collect_file_from_sister_dir_TestCustomConftests.test_pytest_collect_file_from_sister_dir.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 415, "end_line": 444, "span_ids": ["TestCustomConftests.test_pytest_collect_file_from_sister_dir"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCustomConftests(object):\n\n def test_pytest_collect_file_from_sister_dir(self, testdir):\n sub1 = testdir.mkpydir(\"sub1\")\n sub2 = testdir.mkpydir(\"sub2\")\n conf1 = testdir.makeconftest(\n \"\"\"\n import pytest\n class MyModule1(pytest.Module):\n pass\n def pytest_collect_file(path, parent):\n if path.ext == \".py\":\n return MyModule1(path, parent)\n \"\"\"\n )\n conf1.move(sub1.join(conf1.basename))\n conf2 = testdir.makeconftest(\n \"\"\"\n import pytest\n class MyModule2(pytest.Module):\n pass\n def pytest_collect_file(path, parent):\n if path.ext == \".py\":\n return MyModule2(path, parent)\n \"\"\"\n )\n conf2.move(sub2.join(conf2.basename))\n p = testdir.makepyfile(\"def test_x(): pass\")\n p.copy(sub1.join(p.basename))\n p.copy(sub2.join(p.basename))\n result = testdir.runpytest(\"--collect-only\")\n result.stdout.fnmatch_lines([\"*MyModule1*\", \"*MyModule2*\", \"*test_x*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestSession_TestSession.test_parsearg.assert_len_parts_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestSession_TestSession.test_parsearg.assert_len_parts_2", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 447, "end_line": 465, "span_ids": ["TestSession.test_parsearg", "TestSession"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSession(object):\n def test_parsearg(self, testdir):\n p = testdir.makepyfile(\"def test_func(): pass\")\n subdir = testdir.mkdir(\"sub\")\n subdir.ensure(\"__init__.py\")\n target = subdir.join(p.basename)\n p.move(target)\n subdir.chdir()\n config = testdir.parseconfig(p.basename)\n rcol = Session(config=config)\n assert rcol.fspath == subdir\n parts = rcol._parsearg(p.basename)\n\n assert parts[0] == target\n assert len(parts) == 1\n parts = rcol._parsearg(p.basename + \"::test_func\")\n assert parts[0] == target\n assert parts[1] == \"test_func\"\n assert len(parts) == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestSession.test_collect_topdir_TestSession.get_reported_items.return._": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestSession.test_collect_topdir_TestSession.get_reported_items.return._", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 467, "end_line": 490, "span_ids": ["TestSession.test_collect_topdir", "TestSession.get_reported_items"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSession(object):\n\n def test_collect_topdir(self, testdir):\n p = testdir.makepyfile(\"def test_func(): pass\")\n id = \"::\".join([p.basename, \"test_func\"])\n # XXX migrate to collectonly? (see below)\n config = testdir.parseconfig(id)\n topdir = testdir.tmpdir\n rcol = Session(config)\n assert topdir == rcol.fspath\n # rootid = rcol.nodeid\n # root2 = rcol.perform_collect([rcol.nodeid], genitems=False)[0]\n # assert root2 == rcol, rootid\n colitems = rcol.perform_collect([rcol.nodeid], genitems=False)\n assert len(colitems) == 1\n assert colitems[0].fspath == p\n\n def get_reported_items(self, hookrec):\n \"\"\"Return pytest.Item instances reported by the pytest_collectreport hook\"\"\"\n calls = hookrec.getcalls(\"pytest_collectreport\")\n return [\n x\n for call in calls\n for x in call.report.result\n if isinstance(x, pytest.Item)\n ]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestSession.test_collect_protocol_single_function_TestSession.test_collect_protocol_single_function.assert_x_name_for_x_in_s": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestSession.test_collect_protocol_single_function_TestSession.test_collect_protocol_single_function.assert_x_name_for_x_in_s", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 492, "end_line": 513, "span_ids": ["TestSession.test_collect_protocol_single_function"], "tokens": 252}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSession(object):\n\n def test_collect_protocol_single_function(self, testdir):\n p = testdir.makepyfile(\"def test_func(): pass\")\n id = \"::\".join([p.basename, \"test_func\"])\n items, hookrec = testdir.inline_genitems(id)\n item, = items\n assert item.name == \"test_func\"\n newid = item.nodeid\n assert newid == id\n pprint.pprint(hookrec.calls)\n topdir = testdir.tmpdir # noqa\n hookrec.assert_contains(\n [\n (\"pytest_collectstart\", \"collector.fspath == topdir\"),\n (\"pytest_make_collect_report\", \"collector.fspath == topdir\"),\n (\"pytest_collectstart\", \"collector.fspath == p\"),\n (\"pytest_make_collect_report\", \"collector.fspath == p\"),\n (\"pytest_pycollect_makeitem\", \"name == 'test_func'\"),\n (\"pytest_collectreport\", \"report.result[0].name == 'test_func'\"),\n ]\n )\n # ensure we are reporting the collection of the single test item (#2464)\n assert [x.name for x in self.get_reported_items(hookrec)] == [\"test_func\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestSession.test_collect_protocol_method_TestSession.test_collect_protocol_method.for_id_in_p_basename_p_.assert_x_name_for_x_in_s": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestSession.test_collect_protocol_method_TestSession.test_collect_protocol_method.for_id_in_p_basename_p_.assert_x_name_for_x_in_s", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 515, "end_line": 531, "span_ids": ["TestSession.test_collect_protocol_method"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSession(object):\n\n def test_collect_protocol_method(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n class TestClass(object):\n def test_method(self):\n pass\n \"\"\"\n )\n normid = p.basename + \"::TestClass::test_method\"\n for id in [p.basename, p.basename + \"::TestClass\", normid]:\n items, hookrec = testdir.inline_genitems(id)\n assert len(items) == 1\n assert items[0].name == \"test_method\"\n newid = items[0].nodeid\n assert newid == normid\n # ensure we are reporting the collection of the single test item (#2464)\n assert [x.name for x in self.get_reported_items(hookrec)] == [\"test_method\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestSession.test_collect_custom_nodes_multi_id_TestSession.test_collect_custom_nodes_multi_id.assert_len_self_get_repor": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestSession.test_collect_custom_nodes_multi_id_TestSession.test_collect_custom_nodes_multi_id.assert_len_self_get_repor", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 533, "end_line": 567, "span_ids": ["TestSession.test_collect_custom_nodes_multi_id"], "tokens": 281}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSession(object):\n\n def test_collect_custom_nodes_multi_id(self, testdir):\n p = testdir.makepyfile(\"def test_func(): pass\")\n testdir.makeconftest(\n \"\"\"\n import pytest\n class SpecialItem(pytest.Item):\n def runtest(self):\n return # ok\n class SpecialFile(pytest.File):\n def collect(self):\n return [SpecialItem(name=\"check\", parent=self)]\n def pytest_collect_file(path, parent):\n if path.basename == %r:\n return SpecialFile(fspath=path, parent=parent)\n \"\"\"\n % p.basename\n )\n id = p.basename\n\n items, hookrec = testdir.inline_genitems(id)\n pprint.pprint(hookrec.calls)\n assert len(items) == 2\n hookrec.assert_contains(\n [\n (\"pytest_collectstart\", \"collector.fspath == collector.session.fspath\"),\n (\n \"pytest_collectstart\",\n \"collector.__class__.__name__ == 'SpecialFile'\",\n ),\n (\"pytest_collectstart\", \"collector.__class__.__name__ == 'Module'\"),\n (\"pytest_pycollect_makeitem\", \"name == 'test_func'\"),\n (\"pytest_collectreport\", \"report.nodeid.startswith(p.basename)\"),\n ]\n )\n assert len(self.get_reported_items(hookrec)) == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestSession.test_collect_subdir_event_ordering_TestSession.test_collect_subdir_event_ordering.hookrec_assert_contains_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestSession.test_collect_subdir_event_ordering_TestSession.test_collect_subdir_event_ordering.hookrec_assert_contains_", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 569, "end_line": 584, "span_ids": ["TestSession.test_collect_subdir_event_ordering"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSession(object):\n\n def test_collect_subdir_event_ordering(self, testdir):\n p = testdir.makepyfile(\"def test_func(): pass\")\n aaa = testdir.mkpydir(\"aaa\")\n test_aaa = aaa.join(\"test_aaa.py\")\n p.move(test_aaa)\n\n items, hookrec = testdir.inline_genitems()\n assert len(items) == 1\n pprint.pprint(hookrec.calls)\n hookrec.assert_contains(\n [\n (\"pytest_collectstart\", \"collector.fspath == test_aaa\"),\n (\"pytest_pycollect_makeitem\", \"name == 'test_func'\"),\n (\"pytest_collectreport\", \"report.nodeid.startswith('aaa/test_aaa.py')\"),\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestSession.test_collect_two_commandline_args_TestSession.test_collect_two_commandline_args.hookrec_assert_contains_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestSession.test_collect_two_commandline_args_TestSession.test_collect_two_commandline_args.hookrec_assert_contains_", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 586, "end_line": 609, "span_ids": ["TestSession.test_collect_two_commandline_args"], "tokens": 245}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSession(object):\n\n def test_collect_two_commandline_args(self, testdir):\n p = testdir.makepyfile(\"def test_func(): pass\")\n aaa = testdir.mkpydir(\"aaa\")\n bbb = testdir.mkpydir(\"bbb\")\n test_aaa = aaa.join(\"test_aaa.py\")\n p.copy(test_aaa)\n test_bbb = bbb.join(\"test_bbb.py\")\n p.move(test_bbb)\n\n id = \".\"\n\n items, hookrec = testdir.inline_genitems(id)\n assert len(items) == 2\n pprint.pprint(hookrec.calls)\n hookrec.assert_contains(\n [\n (\"pytest_collectstart\", \"collector.fspath == test_aaa\"),\n (\"pytest_pycollect_makeitem\", \"name == 'test_func'\"),\n (\"pytest_collectreport\", \"report.nodeid == 'aaa/test_aaa.py'\"),\n (\"pytest_collectstart\", \"collector.fspath == test_bbb\"),\n (\"pytest_pycollect_makeitem\", \"name == 'test_func'\"),\n (\"pytest_collectreport\", \"report.nodeid == 'bbb/test_bbb.py'\"),\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestSession.test_serialization_byid_TestSession.test_find_byid_without_instance_parents.assert_x_name_for_x_in_s": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestSession.test_serialization_byid_TestSession.test_find_byid_without_instance_parents.assert_x_name_for_x_in_s", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 611, "end_line": 635, "span_ids": ["TestSession.test_find_byid_without_instance_parents", "TestSession.test_serialization_byid"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSession(object):\n\n def test_serialization_byid(self, testdir):\n testdir.makepyfile(\"def test_func(): pass\")\n items, hookrec = testdir.inline_genitems()\n assert len(items) == 1\n item, = items\n items2, hookrec = testdir.inline_genitems(item.nodeid)\n item2, = items2\n assert item2.name == item.name\n assert item2.fspath == item.fspath\n\n def test_find_byid_without_instance_parents(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n class TestClass(object):\n def test_method(self):\n pass\n \"\"\"\n )\n arg = p.basename + \"::TestClass::test_method\"\n items, hookrec = testdir.inline_genitems(arg)\n assert len(items) == 1\n item, = items\n assert item.nodeid.endswith(\"TestClass::test_method\")\n # ensure we are reporting the collection of the single test item (#2464)\n assert [x.name for x in self.get_reported_items(hookrec)] == [\"test_method\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_Test_getinitialnodes_Test_getinitialnodes.test_pkgfile.for_col_in_col_listchain_.assert_col_config_is_conf": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_Test_getinitialnodes_Test_getinitialnodes.test_pkgfile.for_col_in_col_listchain_.assert_col_config_is_conf", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 638, "end_line": 669, "span_ids": ["Test_getinitialnodes.test_global_file", "Test_getinitialnodes", "Test_getinitialnodes.test_pkgfile"], "tokens": 284}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Test_getinitialnodes(object):\n def test_global_file(self, testdir, tmpdir):\n x = tmpdir.ensure(\"x.py\")\n with tmpdir.as_cwd():\n config = testdir.parseconfigure(x)\n col = testdir.getnode(config, x)\n assert isinstance(col, pytest.Module)\n assert col.name == \"x.py\"\n assert col.parent.parent is None\n for col in col.listchain():\n assert col.config is config\n\n def test_pkgfile(self, testdir):\n \"\"\"Verify nesting when a module is within a package.\n The parent chain should match: Module -> Package -> Session.\n Session's parent should always be None.\n \"\"\"\n tmpdir = testdir.tmpdir\n subdir = tmpdir.join(\"subdir\")\n x = subdir.ensure(\"x.py\")\n subdir.ensure(\"__init__.py\")\n with subdir.as_cwd():\n config = testdir.parseconfigure(x)\n col = testdir.getnode(config, x)\n assert col.name == \"x.py\"\n assert isinstance(col, pytest.Module)\n assert isinstance(col.parent, pytest.Package)\n assert isinstance(col.parent.parent, pytest.Session)\n # session is batman (has no parents)\n assert col.parent.parent.parent is None\n for col in col.listchain():\n assert col.config is config", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_Test_genitems_Test_genitems.test_check_collect_hashes.for_numi_i_in_enumerate_.for_numj_j_in_enumerate_.if_numj_numi_.assert_i_j": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_Test_genitems_Test_genitems.test_check_collect_hashes.for_numi_i_in_enumerate_.for_numj_j_in_enumerate_.if_numj_numi_.assert_i_j", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 672, "end_line": 690, "span_ids": ["Test_genitems", "Test_genitems.test_check_collect_hashes"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Test_genitems(object):\n def test_check_collect_hashes(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n def test_1():\n pass\n\n def test_2():\n pass\n \"\"\"\n )\n p.copy(p.dirpath(p.purebasename + \"2\" + \".py\"))\n items, reprec = testdir.inline_genitems(p.dirpath())\n assert len(items) == 4\n for numi, i in enumerate(items):\n for numj, j in enumerate(items):\n if numj != numi:\n assert hash(i) != hash(j)\n assert i != j", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_Test_genitems.test_example_items1_Test_genitems.test_example_items1.print_s_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_Test_genitems.test_example_items1_Test_genitems.test_example_items1.print_s_", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 692, "end_line": 719, "span_ids": ["Test_genitems.test_example_items1"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Test_genitems(object):\n\n def test_example_items1(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n def testone():\n pass\n\n class TestX(object):\n def testmethod_one(self):\n pass\n\n class TestY(TestX):\n pass\n \"\"\"\n )\n items, reprec = testdir.inline_genitems(p)\n assert len(items) == 3\n assert items[0].name == \"testone\"\n assert items[1].name == \"testmethod_one\"\n assert items[2].name == \"testmethod_one\"\n\n # let's also test getmodpath here\n assert items[0].getmodpath() == \"testone\"\n assert items[1].getmodpath() == \"TestX.testmethod_one\"\n assert items[2].getmodpath() == \"TestY.testmethod_one\"\n\n s = items[0].getmodpath(stopatmodule=False)\n assert s.endswith(\"test_example_items1.testone\")\n print(s)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_Test_genitems.test_class_and_functions_discovery_using_glob_Test_genitems.test_class_and_functions_discovery_using_glob.assert_ids_MyTestSui": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_Test_genitems.test_class_and_functions_discovery_using_glob_Test_genitems.test_class_and_functions_discovery_using_glob.assert_ids_MyTestSui", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 721, "end_line": 746, "span_ids": ["Test_genitems.test_class_and_functions_discovery_using_glob"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class Test_genitems(object):\n\n def test_class_and_functions_discovery_using_glob(self, testdir):\n \"\"\"\n tests that python_classes and python_functions config options work\n as prefixes and glob-like patterns (issue #600).\n \"\"\"\n testdir.makeini(\n \"\"\"\n [pytest]\n python_classes = *Suite Test\n python_functions = *_test test\n \"\"\"\n )\n p = testdir.makepyfile(\n \"\"\"\n class MyTestSuite(object):\n def x_test(self):\n pass\n\n class TestCase(object):\n def test_y(self):\n pass\n \"\"\"\n )\n items, reprec = testdir.inline_genitems(p)\n ids = [x.getmodpath() for x in items]\n assert ids == [\"MyTestSuite.x_test\", \"TestCase.test_y\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_matchnodes_two_collections_same_file_test_matchnodes_two_collections_same_file.res_stdout_fnmatch_lines_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_matchnodes_two_collections_same_file_test_matchnodes_two_collections_same_file.res_stdout_fnmatch_lines_", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 749, "end_line": 782, "span_ids": ["test_matchnodes_two_collections_same_file"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_matchnodes_two_collections_same_file(testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n def pytest_configure(config):\n config.pluginmanager.register(Plugin2())\n\n class Plugin2(object):\n def pytest_collect_file(self, path, parent):\n if path.ext == \".abc\":\n return MyFile2(path, parent)\n\n def pytest_collect_file(path, parent):\n if path.ext == \".abc\":\n return MyFile1(path, parent)\n\n class MyFile1(pytest.Item, pytest.File):\n def runtest(self):\n pass\n class MyFile2(pytest.File):\n def collect(self):\n return [Item2(\"hello\", parent=self)]\n\n class Item2(pytest.Item):\n def runtest(self):\n pass\n \"\"\"\n )\n p = testdir.makefile(\".abc\", \"\")\n result = testdir.runpytest()\n assert result.ret == 0\n result.stdout.fnmatch_lines([\"*2 passed*\"])\n res = testdir.runpytest(\"%s::hello\" % p.basename)\n res.stdout.fnmatch_lines([\"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestNodekeywords_TestNodekeywords.test_issue345.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_TestNodekeywords_TestNodekeywords.test_issue345.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 785, "end_line": 810, "span_ids": ["TestNodekeywords.test_issue345", "TestNodekeywords", "TestNodekeywords.test_no_under"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestNodekeywords(object):\n def test_no_under(self, testdir):\n modcol = testdir.getmodulecol(\n \"\"\"\n def test_pass(): pass\n def test_fail(): assert 0\n \"\"\"\n )\n values = list(modcol.keywords)\n assert modcol.name in values\n for x in values:\n assert not x.startswith(\"_\")\n assert modcol.name in repr(modcol.keywords)\n\n def test_issue345(self, testdir):\n testdir.makepyfile(\n \"\"\"\n def test_should_not_be_selected():\n assert False, 'I should not have been selected to run'\n\n def test___repr__():\n pass\n \"\"\"\n )\n reprec = testdir.inline_run(\"-k repr\")\n reprec.assertoutcome(passed=1, failed=0)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_COLLECTION_ERROR_PY_FILES_COLLECTION_ERROR_PY_FILES.dict_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_COLLECTION_ERROR_PY_FILES_COLLECTION_ERROR_PY_FILES.dict_", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 813, "end_line": 832, "span_ids": ["impl"], "tokens": 105}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "COLLECTION_ERROR_PY_FILES = dict(\n test_01_failure=\"\"\"\n def test_1():\n assert False\n \"\"\",\n test_02_import_error=\"\"\"\n import asdfasdfasdf\n def test_2():\n assert True\n \"\"\",\n test_03_import_error=\"\"\"\n import asdfasdfasdf\n def test_3():\n assert True\n \"\"\",\n test_04_success=\"\"\"\n def test_4():\n assert True\n \"\"\",\n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_exit_on_collection_error_test_exit_on_collection_error.res_stdout_fnmatch_lines_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_exit_on_collection_error_test_exit_on_collection_error.res_stdout_fnmatch_lines_", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 835, "end_line": 850, "span_ids": ["test_exit_on_collection_error"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_exit_on_collection_error(testdir):\n \"\"\"Verify that all collection errors are collected and no tests executed\"\"\"\n testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)\n\n res = testdir.runpytest()\n assert res.ret == 2\n\n res.stdout.fnmatch_lines(\n [\n \"collected 2 items / 2 errors\",\n \"*ERROR collecting test_02_import_error.py*\",\n \"*No module named *asdfa*\",\n \"*ERROR collecting test_03_import_error.py*\",\n \"*No module named *asdfa*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_exit_on_collection_with_maxfail_smaller_than_n_errors_test_exit_on_collection_with_maxfail_smaller_than_n_errors.assert_test_03_not_in_r": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_exit_on_collection_with_maxfail_smaller_than_n_errors_test_exit_on_collection_with_maxfail_smaller_than_n_errors.assert_test_03_not_in_r", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 853, "end_line": 867, "span_ids": ["test_exit_on_collection_with_maxfail_smaller_than_n_errors"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_exit_on_collection_with_maxfail_smaller_than_n_errors(testdir):\n \"\"\"\n Verify collection is aborted once maxfail errors are encountered ignoring\n further modules which would cause more collection errors.\n \"\"\"\n testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)\n\n res = testdir.runpytest(\"--maxfail=1\")\n assert res.ret == 1\n\n res.stdout.fnmatch_lines(\n [\"*ERROR collecting test_02_import_error.py*\", \"*No module named *asdfa*\"]\n )\n\n assert \"test_03\" not in res.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_exit_on_collection_with_maxfail_bigger_than_n_errors_test_continue_on_collection_errors.res_stdout_fnmatch_lines_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_exit_on_collection_with_maxfail_bigger_than_n_errors_test_continue_on_collection_errors.res_stdout_fnmatch_lines_", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 870, "end_line": 903, "span_ids": ["test_continue_on_collection_errors", "test_exit_on_collection_with_maxfail_bigger_than_n_errors"], "tokens": 251}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_exit_on_collection_with_maxfail_bigger_than_n_errors(testdir):\n \"\"\"\n Verify the test run aborts due to collection errors even if maxfail count of\n errors was not reached.\n \"\"\"\n testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)\n\n res = testdir.runpytest(\"--maxfail=4\")\n assert res.ret == 2\n\n res.stdout.fnmatch_lines(\n [\n \"collected 2 items / 2 errors\",\n \"*ERROR collecting test_02_import_error.py*\",\n \"*No module named *asdfa*\",\n \"*ERROR collecting test_03_import_error.py*\",\n \"*No module named *asdfa*\",\n ]\n )\n\n\ndef test_continue_on_collection_errors(testdir):\n \"\"\"\n Verify tests are executed even when collection errors occur when the\n --continue-on-collection-errors flag is set\n \"\"\"\n testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)\n\n res = testdir.runpytest(\"--continue-on-collection-errors\")\n assert res.ret == 1\n\n res.stdout.fnmatch_lines(\n [\"collected 2 items / 2 errors\", \"*1 failed, 1 passed, 2 error*\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_continue_on_collection_errors_maxfail_test_continue_on_collection_errors_maxfail.res_stdout_fnmatch_lines_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_continue_on_collection_errors_maxfail_test_continue_on_collection_errors_maxfail.res_stdout_fnmatch_lines_", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 906, "end_line": 919, "span_ids": ["test_continue_on_collection_errors_maxfail"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_continue_on_collection_errors_maxfail(testdir):\n \"\"\"\n Verify tests are executed even when collection errors occur and that maxfail\n is honoured (including the collection error count).\n 4 tests: 2 collection errors + 1 failure + 1 success\n test_4 is never executed because the test run is with --maxfail=3 which\n means it is interrupted after the 2 collection errors + 1 failure.\n \"\"\"\n testdir.makepyfile(**COLLECTION_ERROR_PY_FILES)\n\n res = testdir.runpytest(\"--continue-on-collection-errors\", \"--maxfail=3\")\n assert res.ret == 1\n\n res.stdout.fnmatch_lines([\"collected 2 items / 2 errors\", \"*1 failed, 2 error*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_fixture_scope_sibling_conftests_test_fixture_scope_sibling_conftests.res_stdout_fnmatch_lines_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_fixture_scope_sibling_conftests_test_fixture_scope_sibling_conftests.res_stdout_fnmatch_lines_", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 922, "end_line": 950, "span_ids": ["test_fixture_scope_sibling_conftests"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fixture_scope_sibling_conftests(testdir):\n \"\"\"Regression test case for https://github.com/pytest-dev/pytest/issues/2836\"\"\"\n foo_path = testdir.mkdir(\"foo\")\n foo_path.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n @pytest.fixture\n def fix():\n return 1\n \"\"\"\n )\n )\n foo_path.join(\"test_foo.py\").write(\"def test_foo(fix): assert fix == 1\")\n\n # Tests in `food/` should not see the conftest fixture from `foo/`\n food_path = testdir.mkpydir(\"food\")\n food_path.join(\"test_food.py\").write(\"def test_food(fix): assert fix == 1\")\n\n res = testdir.runpytest()\n assert res.ret == 1\n\n res.stdout.fnmatch_lines(\n [\n \"*ERROR at setup of test_food*\",\n \"E*fixture 'fix' not found\",\n \"*1 passed, 1 error*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collect_init_tests_test_collect_init_tests.assert_test_foo_not_in_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collect_init_tests_test_collect_init_tests.assert_test_foo_not_in_", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 953, "end_line": 1011, "span_ids": ["test_collect_init_tests"], "tokens": 512}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_collect_init_tests(testdir):\n \"\"\"Check that we collect files from __init__.py files when they patch the 'python_files' (#3773)\"\"\"\n p = testdir.copy_example(\"collect/collect_init_tests\")\n result = testdir.runpytest(p, \"--collect-only\")\n result.stdout.fnmatch_lines(\n [\n \"collected 2 items\",\n \"\",\n \" \",\n \" \",\n \" \",\n ]\n )\n result = testdir.runpytest(\"./tests\", \"--collect-only\")\n result.stdout.fnmatch_lines(\n [\n \"collected 2 items\",\n \"\",\n \" \",\n \" \",\n \" \",\n ]\n )\n # Ignores duplicates with \".\" and pkginit (#4310).\n result = testdir.runpytest(\"./tests\", \".\", \"--collect-only\")\n result.stdout.fnmatch_lines(\n [\n \"collected 2 items\",\n \"\",\n \" \",\n \" \",\n \" \",\n \" \",\n ]\n )\n # Same as before, but different order.\n result = testdir.runpytest(\".\", \"tests\", \"--collect-only\")\n result.stdout.fnmatch_lines(\n [\n \"collected 2 items\",\n \"\",\n \" \",\n \" \",\n \" \",\n \" \",\n ]\n )\n result = testdir.runpytest(\"./tests/test_foo.py\", \"--collect-only\")\n result.stdout.fnmatch_lines(\n [\"\", \" \", \" \"]\n )\n assert \"test_init\" not in result.stdout.str()\n result = testdir.runpytest(\"./tests/__init__.py\", \"--collect-only\")\n result.stdout.fnmatch_lines(\n [\"\", \" \", \" \"]\n )\n assert \"test_foo\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collect_invalid_signature_message_test_collect_handles_raising_on_dunder_class.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collect_invalid_signature_message_test_collect_handles_raising_on_dunder_class.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 1014, "end_line": 1058, "span_ids": ["test_collect_handles_raising_on_dunder_class", "test_collect_invalid_signature_message"], "tokens": 241}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_collect_invalid_signature_message(testdir):\n \"\"\"Check that we issue a proper message when we can't determine the signature of a test\n function (#4026).\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n class TestCase:\n @pytest.fixture\n def fix():\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\"Could not determine arguments of *.fix *: invalid method signature\"]\n )\n\n\ndef test_collect_handles_raising_on_dunder_class(testdir):\n \"\"\"Handle proxy classes like Django's LazySettings that might raise on\n ``isinstance`` (#4266).\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n class ImproperlyConfigured(Exception):\n pass\n\n class RaisesOnGetAttr(object):\n def raises(self):\n raise ImproperlyConfigured\n\n __class__ = property(raises)\n\n raises = RaisesOnGetAttr()\n\n\n def test_1():\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*1 passed in*\"])\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collect_with_chdir_during_import_test_collect_with_chdir_during_import.None_4": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collect_with_chdir_during_import_test_collect_with_chdir_during_import.None_4", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 1061, "end_line": 1094, "span_ids": ["test_collect_with_chdir_during_import"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_collect_with_chdir_during_import(testdir):\n subdir = testdir.tmpdir.mkdir(\"sub\")\n testdir.tmpdir.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\n import os\n os.chdir(%r)\n \"\"\"\n % (str(subdir),)\n )\n )\n testdir.makepyfile(\n \"\"\"\n def test_1():\n import os\n assert os.getcwd() == %r\n \"\"\"\n % (str(subdir),)\n )\n with testdir.tmpdir.as_cwd():\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*1 passed in*\"])\n assert result.ret == 0\n\n # Handles relative testpaths.\n testdir.makeini(\n \"\"\"\n [pytest]\n testpaths = .\n \"\"\"\n )\n with testdir.tmpdir.as_cwd():\n result = testdir.runpytest(\"--collect-only\")\n result.stdout.fnmatch_lines([\"collected 1 item\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collect_pyargs_with_testpaths_test_collect_pyargs_with_testpaths.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collect_pyargs_with_testpaths_test_collect_pyargs_with_testpaths.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 1097, "end_line": 1116, "span_ids": ["test_collect_pyargs_with_testpaths"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_collect_pyargs_with_testpaths(testdir, monkeypatch):\n testmod = testdir.mkdir(\"testmod\")\n # NOTE: __init__.py is not collected since it does not match python_files.\n testmod.ensure(\"__init__.py\").write(\"def test_func(): pass\")\n testmod.ensure(\"test_file.py\").write(\"def test_func(): pass\")\n\n root = testdir.mkdir(\"root\")\n root.ensure(\"pytest.ini\").write(\n textwrap.dedent(\n \"\"\"\n [pytest]\n addopts = --pyargs\n testpaths = testmod\n \"\"\"\n )\n )\n monkeypatch.setenv(\"PYTHONPATH\", str(testdir.tmpdir), prepend=os.pathsep)\n with root.as_cwd():\n result = testdir.runpytest_subprocess()\n result.stdout.fnmatch_lines([\"*1 passed in*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collect_symlink_file_arg_test_collect_symlink_file_arg.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collect_symlink_file_arg_test_collect_symlink_file_arg.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 1119, "end_line": 1135, "span_ids": ["test_collect_symlink_file_arg"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not hasattr(py.path.local, \"mksymlinkto\"),\n reason=\"symlink not available on this platform\",\n)\ndef test_collect_symlink_file_arg(testdir):\n \"\"\"Test that collecting a direct symlink, where the target does not match python_files works (#4325).\"\"\"\n real = testdir.makepyfile(\n real=\"\"\"\n def test_nodeid(request):\n assert request.node.nodeid == \"real.py::test_nodeid\"\n \"\"\"\n )\n symlink = testdir.tmpdir.join(\"symlink.py\")\n symlink.mksymlinkto(real)\n result = testdir.runpytest(\"-v\", symlink)\n result.stdout.fnmatch_lines([\"real.py::test_nodeid PASSED*\", \"*1 passed in*\"])\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collect_symlink_out_of_tree_test_collect_symlink_out_of_tree.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collect_symlink_out_of_tree_test_collect_symlink_out_of_tree.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 1138, "end_line": 1168, "span_ids": ["test_collect_symlink_out_of_tree"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not hasattr(py.path.local, \"mksymlinkto\"),\n reason=\"symlink not available on this platform\",\n)\ndef test_collect_symlink_out_of_tree(testdir):\n \"\"\"Test collection of symlink via out-of-tree rootdir.\"\"\"\n sub = testdir.tmpdir.join(\"sub\")\n real = sub.join(\"test_real.py\")\n real.write(\n textwrap.dedent(\n \"\"\"\n def test_nodeid(request):\n # Should not contain sub/ prefix.\n assert request.node.nodeid == \"test_real.py::test_nodeid\"\n \"\"\"\n ),\n ensure=True,\n )\n\n out_of_tree = testdir.tmpdir.join(\"out_of_tree\").ensure(dir=True)\n symlink_to_sub = out_of_tree.join(\"symlink_to_sub\")\n symlink_to_sub.mksymlinkto(sub)\n sub.chdir()\n result = testdir.runpytest(\"-vs\", \"--rootdir=%s\" % sub, symlink_to_sub)\n result.stdout.fnmatch_lines(\n [\n # Should not contain \"sub/\"!\n \"test_real.py::test_nodeid PASSED\"\n ]\n )\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collectignore_via_conftest_test_collectignore_via_conftest.assert_result_ret_EXIT": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collectignore_via_conftest_test_collectignore_via_conftest.assert_result_ret_EXIT", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 1171, "end_line": 1181, "span_ids": ["test_collectignore_via_conftest"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_collectignore_via_conftest(testdir, monkeypatch):\n \"\"\"collect_ignore in parent conftest skips importing child (issue #4592).\"\"\"\n tests = testdir.mkpydir(\"tests\")\n tests.ensure(\"conftest.py\").write(\"collect_ignore = ['ignore_me']\")\n\n ignore_me = tests.mkdir(\"ignore_me\")\n ignore_me.ensure(\"__init__.py\")\n ignore_me.ensure(\"conftest.py\").write(\"assert 0, 'should_not_be_called'\")\n\n result = testdir.runpytest()\n assert result.ret == EXIT_NOTESTSCOLLECTED", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collect_pkg_init_and_file_in_args_test_collect_pkg_init_and_file_in_args.None_3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collect_pkg_init_and_file_in_args_test_collect_pkg_init_and_file_in_args.None_3", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 1184, "end_line": 1210, "span_ids": ["test_collect_pkg_init_and_file_in_args"], "tokens": 250}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_collect_pkg_init_and_file_in_args(testdir):\n subdir = testdir.mkdir(\"sub\")\n init = subdir.ensure(\"__init__.py\")\n init.write(\"def test_init(): pass\")\n p = subdir.ensure(\"test_file.py\")\n p.write(\"def test_file(): pass\")\n\n # NOTE: without \"-o python_files=*.py\" this collects test_file.py twice.\n # This changed/broke with \"Add package scoped fixtures #2283\" (2b1410895)\n # initially (causing a RecursionError).\n result = testdir.runpytest(\"-v\", str(init), str(p))\n result.stdout.fnmatch_lines(\n [\n \"sub/test_file.py::test_file PASSED*\",\n \"sub/test_file.py::test_file PASSED*\",\n \"*2 passed in*\",\n ]\n )\n\n result = testdir.runpytest(\"-v\", \"-o\", \"python_files=*.py\", str(init), str(p))\n result.stdout.fnmatch_lines(\n [\n \"sub/__init__.py::test_init PASSED*\",\n \"sub/test_file.py::test_file PASSED*\",\n \"*2 passed in*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collect_sub_with_symlinks_test_collect_sub_with_symlinks.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collect_sub_with_symlinks_test_collect_sub_with_symlinks.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 1213, "end_line": 1237, "span_ids": ["test_collect_sub_with_symlinks"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not hasattr(py.path.local, \"mksymlinkto\"),\n reason=\"symlink not available on this platform\",\n)\n@pytest.mark.parametrize(\"use_pkg\", (True, False))\ndef test_collect_sub_with_symlinks(use_pkg, testdir):\n sub = testdir.mkdir(\"sub\")\n if use_pkg:\n sub.ensure(\"__init__.py\")\n sub.ensure(\"test_file.py\").write(\"def test_file(): pass\")\n\n # Create a broken symlink.\n sub.join(\"test_broken.py\").mksymlinkto(\"test_doesnotexist.py\")\n\n # Symlink that gets collected.\n sub.join(\"test_symlink.py\").mksymlinkto(\"test_file.py\")\n\n result = testdir.runpytest(\"-v\", str(sub))\n result.stdout.fnmatch_lines(\n [\n \"sub/test_file.py::test_file PASSED*\",\n \"sub/test_symlink.py::test_file PASSED*\",\n \"*2 passed in*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collector_respects_tbstyle_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_collection.py_test_collector_respects_tbstyle_", "embedding": null, "metadata": {"file_path": "testing/test_collection.py", "file_name": "test_collection.py", "file_type": "text/x-python", "category": "test", "start_line": 1240, "end_line": 1255, "span_ids": ["test_collector_respects_tbstyle"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_collector_respects_tbstyle(testdir):\n p1 = testdir.makepyfile(\"assert 0\")\n result = testdir.runpytest(p1, \"--tb=native\")\n assert result.ret == EXIT_INTERRUPTED\n result.stdout.fnmatch_lines(\n [\n \"*_ ERROR collecting test_collector_respects_tbstyle.py _*\",\n \"Traceback (most recent call last):\",\n ' File \"*/test_collector_respects_tbstyle.py\", line 1, in ',\n \" assert 0\",\n \"AssertionError: assert 0\",\n \"*! Interrupted: 1 errors during collection !*\",\n \"*= 1 error in *\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_compat.py_from___future___import_ab_test_is_generator.assert_not_is_generator_f": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_compat.py_from___future___import_ab_test_is_generator.assert_not_is_generator_f", "embedding": null, "metadata": {"file_path": "testing/test_compat.py", "file_name": "test_compat.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 27, "span_ids": ["imports", "test_is_generator"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nfrom functools import wraps\n\nimport six\n\nimport pytest\nfrom _pytest.compat import _PytestWrapper\nfrom _pytest.compat import get_real_func\nfrom _pytest.compat import is_generator\nfrom _pytest.compat import safe_getattr\nfrom _pytest.compat import safe_isclass\nfrom _pytest.outcomes import OutcomeException\n\n\ndef test_is_generator():\n def zap():\n yield # pragma: no cover\n\n def foo():\n pass # pragma: no cover\n\n assert is_generator(zap)\n assert not is_generator(foo)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_compat.py_test_real_func_loop_limit_test_real_func_loop_limit.with_pytest_raises_.get_real_func_evil_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_compat.py_test_real_func_loop_limit_test_real_func_loop_limit.with_pytest_raises_.get_real_func_evil_", "embedding": null, "metadata": {"file_path": "testing/test_compat.py", "file_name": "test_compat.py", "file_type": "text/x-python", "category": "test", "start_line": 30, "end_line": 53, "span_ids": ["test_real_func_loop_limit", "test_real_func_loop_limit.Evil", "test_real_func_loop_limit.Evil.__init__"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_real_func_loop_limit():\n class Evil(object):\n def __init__(self):\n self.left = 1000\n\n def __repr__(self):\n return \"\".format(left=self.left)\n\n def __getattr__(self, attr):\n if not self.left:\n raise RuntimeError(\"it's over\") # pragma: no cover\n self.left -= 1\n return self\n\n evil = Evil()\n\n with pytest.raises(\n ValueError,\n match=(\n \"could not find real function of \\n\"\n \"stopped at \"\n ),\n ):\n get_real_func(evil)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_compat.py_test_get_real_func_test_get_real_func.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_compat.py_test_get_real_func_test_get_real_func.None_2", "embedding": null, "metadata": {"file_path": "testing/test_compat.py", "file_name": "test_compat.py", "file_type": "text/x-python", "category": "test", "start_line": 56, "end_line": 80, "span_ids": ["test_get_real_func"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_get_real_func():\n \"\"\"Check that get_real_func correctly unwraps decorators until reaching the real function\"\"\"\n\n def decorator(f):\n @wraps(f)\n def inner():\n pass # pragma: no cover\n\n if six.PY2:\n inner.__wrapped__ = f\n return inner\n\n def func():\n pass # pragma: no cover\n\n wrapped_func = decorator(decorator(func))\n assert get_real_func(wrapped_func) is func\n\n wrapped_func2 = decorator(decorator(wrapped_func))\n assert get_real_func(wrapped_func2) is func\n\n # special case for __pytest_wrapped__ attribute: used to obtain the function up until the point\n # a function was wrapped by pytest itself\n wrapped_func2.__pytest_wrapped__ = _PytestWrapper(wrapped_func)\n assert get_real_func(wrapped_func2) is wrapped_func", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_compat.py_test_is_generator_asyncio_test_is_generator_asyncio.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_compat.py_test_is_generator_asyncio_test_is_generator_asyncio.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_compat.py", "file_name": "test_compat.py", "file_type": "text/x-python", "category": "test", "start_line": 83, "end_line": 102, "span_ids": ["test_is_generator_asyncio"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n sys.version_info < (3, 4), reason=\"asyncio available in Python 3.4+\"\n)\ndef test_is_generator_asyncio(testdir):\n testdir.makepyfile(\n \"\"\"\n from _pytest.compat import is_generator\n import asyncio\n @asyncio.coroutine\n def baz():\n yield from [1,2,3]\n\n def test_is_generator_asyncio():\n assert not is_generator(baz)\n \"\"\"\n )\n # avoid importing asyncio into pytest's own process,\n # which in turn imports logging (#8)\n result = testdir.runpytest_subprocess()\n result.stdout.fnmatch_lines([\"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_compat.py_test_is_generator_async_syntax_test_is_generator_async_syntax.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_compat.py_test_is_generator_async_syntax_test_is_generator_async_syntax.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_compat.py", "file_name": "test_compat.py", "file_type": "text/x-python", "category": "test", "start_line": 105, "end_line": 124, "span_ids": ["test_is_generator_async_syntax"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n sys.version_info < (3, 5), reason=\"async syntax available in Python 3.5+\"\n)\ndef test_is_generator_async_syntax(testdir):\n testdir.makepyfile(\n \"\"\"\n from _pytest.compat import is_generator\n def test_is_generator_py35():\n async def foo():\n await foo()\n\n async def bar():\n pass\n\n assert not is_generator(foo)\n assert not is_generator(bar)\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_compat.py_ErrorsHelper_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_compat.py_ErrorsHelper_", "embedding": null, "metadata": {"file_path": "testing/test_compat.py", "file_name": "test_compat.py", "file_type": "text/x-python", "category": "test", "start_line": 127, "end_line": 160, "span_ids": ["test_safe_isclass.CrappyClass", "test_safe_getattr", "test_safe_isclass.CrappyClass.__class__", "ErrorsHelper.raise_exception", "ErrorsHelper.raise_fail", "test_safe_isclass", "ErrorsHelper", "test_helper_failures"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class ErrorsHelper(object):\n @property\n def raise_exception(self):\n raise Exception(\"exception should be catched\")\n\n @property\n def raise_fail(self):\n pytest.fail(\"fail should be catched\")\n\n\ndef test_helper_failures():\n helper = ErrorsHelper()\n with pytest.raises(Exception):\n helper.raise_exception\n with pytest.raises(OutcomeException):\n helper.raise_fail\n\n\ndef test_safe_getattr():\n helper = ErrorsHelper()\n assert safe_getattr(helper, \"raise_exception\", \"default\") == \"default\"\n assert safe_getattr(helper, \"raise_fail\", \"default\") == \"default\"\n\n\ndef test_safe_isclass():\n assert safe_isclass(type) is True\n\n class CrappyClass(Exception):\n @property\n def __class__(self):\n assert False, \"Should be ignored\"\n\n assert safe_isclass(CrappyClass()) is False", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_from___future___import_ab_None_16": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_from___future___import_ab_None_16", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 20, "span_ids": ["imports"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport textwrap\n\nimport attr\n\nimport _pytest._code\nimport pytest\nfrom _pytest.config import _iter_rewritable_modules\nfrom _pytest.config.exceptions import UsageError\nfrom _pytest.config.findpaths import determine_setup\nfrom _pytest.config.findpaths import get_common_ancestor\nfrom _pytest.config.findpaths import getcfg\nfrom _pytest.main import EXIT_NOTESTSCOLLECTED\nfrom _pytest.main import EXIT_OK\nfrom _pytest.main import EXIT_TESTSFAILED\nfrom _pytest.main import EXIT_USAGEERROR", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestParseIni_TestParseIni.test_getcfg_and_config.assert_config_inicfg_nam": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestParseIni_TestParseIni.test_getcfg_and_config.assert_config_inicfg_nam", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 23, "end_line": 43, "span_ids": ["TestParseIni", "TestParseIni.test_getcfg_and_config"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestParseIni(object):\n @pytest.mark.parametrize(\n \"section, filename\", [(\"pytest\", \"pytest.ini\"), (\"tool:pytest\", \"setup.cfg\")]\n )\n def test_getcfg_and_config(self, testdir, tmpdir, section, filename):\n sub = tmpdir.mkdir(\"sub\")\n sub.chdir()\n tmpdir.join(filename).write(\n textwrap.dedent(\n \"\"\"\\\n [{section}]\n name = value\n \"\"\".format(\n section=section\n )\n )\n )\n rootdir, inifile, cfg = getcfg([sub])\n assert cfg[\"name\"] == \"value\"\n config = testdir.parseconfigure(sub)\n assert config.inicfg[\"name\"] == \"value\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestParseIni.test_getcfg_empty_path_TestParseIni.test_setupcfg_uses_toolpytest_with_pytest.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestParseIni.test_getcfg_empty_path_TestParseIni.test_setupcfg_uses_toolpytest_with_pytest.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 45, "end_line": 63, "span_ids": ["TestParseIni.test_setupcfg_uses_toolpytest_with_pytest", "TestParseIni.test_getcfg_empty_path"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestParseIni(object):\n\n def test_getcfg_empty_path(self):\n \"\"\"correctly handle zero length arguments (a la pytest '')\"\"\"\n getcfg([\"\"])\n\n def test_setupcfg_uses_toolpytest_with_pytest(self, testdir):\n p1 = testdir.makepyfile(\"def test(): pass\")\n testdir.makefile(\n \".cfg\",\n setup=\"\"\"\n [tool:pytest]\n testpaths=%s\n [pytest]\n testpaths=ignored\n \"\"\"\n % p1.basename,\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*, inifile: setup.cfg, *\", \"* 1 passed in *\"])\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestParseIni.test_append_parse_args_TestParseIni.test_tox_ini_wrong_version.result_stderr_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestParseIni.test_append_parse_args_TestParseIni.test_tox_ini_wrong_version.result_stderr_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 65, "end_line": 91, "span_ids": ["TestParseIni.test_tox_ini_wrong_version", "TestParseIni.test_append_parse_args"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestParseIni(object):\n\n def test_append_parse_args(self, testdir, tmpdir, monkeypatch):\n monkeypatch.setenv(\"PYTEST_ADDOPTS\", '--color no -rs --tb=\"short\"')\n tmpdir.join(\"pytest.ini\").write(\n textwrap.dedent(\n \"\"\"\\\n [pytest]\n addopts = --verbose\n \"\"\"\n )\n )\n config = testdir.parseconfig(tmpdir)\n assert config.option.color == \"no\"\n assert config.option.reportchars == \"s\"\n assert config.option.tbstyle == \"short\"\n assert config.option.verbose\n\n def test_tox_ini_wrong_version(self, testdir):\n testdir.makefile(\n \".ini\",\n tox=\"\"\"\n [pytest]\n minversion=9.0\n \"\"\",\n )\n result = testdir.runpytest()\n assert result.ret != 0\n result.stderr.fnmatch_lines([\"*tox.ini:2*requires*9.0*actual*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestParseIni.test_ini_names_TestParseIni.test_ini_names.assert_config_getini_min": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestParseIni.test_ini_names_TestParseIni.test_ini_names.assert_config_getini_min", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 93, "end_line": 109, "span_ids": ["TestParseIni.test_ini_names"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestParseIni(object):\n\n @pytest.mark.parametrize(\n \"section, name\",\n [(\"tool:pytest\", \"setup.cfg\"), (\"pytest\", \"tox.ini\"), (\"pytest\", \"pytest.ini\")],\n )\n def test_ini_names(self, testdir, name, section):\n testdir.tmpdir.join(name).write(\n textwrap.dedent(\n \"\"\"\n [{section}]\n minversion = 1.0\n \"\"\".format(\n section=section\n )\n )\n )\n config = testdir.parseconfig()\n assert config.getini(\"minversion\") == \"1.0\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestParseIni.test_toxini_before_lower_pytestini_TestParseIni.test_confcutdir.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestParseIni.test_toxini_before_lower_pytestini_TestParseIni.test_confcutdir.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 111, "end_line": 143, "span_ids": ["TestParseIni.test_confcutdir", "TestParseIni.test_toxini_before_lower_pytestini"], "tokens": 216}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestParseIni(object):\n\n def test_toxini_before_lower_pytestini(self, testdir):\n sub = testdir.tmpdir.mkdir(\"sub\")\n sub.join(\"tox.ini\").write(\n textwrap.dedent(\n \"\"\"\n [pytest]\n minversion = 2.0\n \"\"\"\n )\n )\n testdir.tmpdir.join(\"pytest.ini\").write(\n textwrap.dedent(\n \"\"\"\n [pytest]\n minversion = 1.5\n \"\"\"\n )\n )\n config = testdir.parseconfigure(sub)\n assert config.getini(\"minversion\") == \"2.0\"\n\n @pytest.mark.xfail(reason=\"probably not needed\")\n def test_confcutdir(self, testdir):\n sub = testdir.mkdir(\"sub\")\n sub.chdir()\n testdir.makeini(\n \"\"\"\n [pytest]\n addopts = --qwe\n \"\"\"\n )\n result = testdir.inline_run(\"--confcutdir=.\")\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigCmdlineParsing_TestConfigCmdlineParsing.test_absolute_win32_path.assert_ret__pytest_mai": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigCmdlineParsing_TestConfigCmdlineParsing.test_absolute_win32_path.assert_ret__pytest_mai", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 146, "end_line": 196, "span_ids": ["TestConfigCmdlineParsing.test_explicitly_specified_config_file_is_loaded", "TestConfigCmdlineParsing", "TestConfigCmdlineParsing.test_parsing_again_fails", "TestConfigCmdlineParsing.test_absolute_win32_path"], "tokens": 320}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestConfigCmdlineParsing(object):\n def test_parsing_again_fails(self, testdir):\n config = testdir.parseconfig()\n pytest.raises(AssertionError, lambda: config.parse([]))\n\n def test_explicitly_specified_config_file_is_loaded(self, testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_addoption(parser):\n parser.addini(\"custom\", \"\")\n \"\"\"\n )\n testdir.makeini(\n \"\"\"\n [pytest]\n custom = 0\n \"\"\"\n )\n testdir.makefile(\n \".ini\",\n custom=\"\"\"\n [pytest]\n custom = 1\n \"\"\",\n )\n config = testdir.parseconfig(\"-c\", \"custom.ini\")\n assert config.getini(\"custom\") == \"1\"\n\n testdir.makefile(\n \".cfg\",\n custom_tool_pytest_section=\"\"\"\n [tool:pytest]\n custom = 1\n \"\"\",\n )\n config = testdir.parseconfig(\"-c\", \"custom_tool_pytest_section.cfg\")\n assert config.getini(\"custom\") == \"1\"\n\n def test_absolute_win32_path(self, testdir):\n temp_ini_file = testdir.makefile(\n \".ini\",\n custom=\"\"\"\n [pytest]\n addopts = --version\n \"\"\",\n )\n from os.path import normpath\n\n temp_ini_file = normpath(str(temp_ini_file))\n ret = pytest.main([\"-c\", temp_ini_file])\n assert ret == _pytest.main.EXIT_OK", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI_TestConfigAPI.test_getoption.assert_config_getoption_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI_TestConfigAPI.test_getoption.assert_config_getoption_", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 199, "end_line": 254, "span_ids": ["TestConfigAPI.test_config_getoption", "TestConfigAPI.test_config_getoption_unicode", "TestConfigAPI.test_config_trace", "TestConfigAPI.test_config_getvalueorskip_None", "TestConfigAPI.test_getoption", "TestConfigAPI", "TestConfigAPI.test_config_getvalueorskip"], "tokens": 420}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestConfigAPI(object):\n def test_config_trace(self, testdir):\n config = testdir.parseconfig()\n values = []\n config.trace.root.setwriter(values.append)\n config.trace(\"hello\")\n assert len(values) == 1\n assert values[0] == \"hello [config]\\n\"\n\n def test_config_getoption(self, testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_addoption(parser):\n parser.addoption(\"--hello\", \"-X\", dest=\"hello\")\n \"\"\"\n )\n config = testdir.parseconfig(\"--hello=this\")\n for x in (\"hello\", \"--hello\", \"-X\"):\n assert config.getoption(x) == \"this\"\n pytest.raises(ValueError, config.getoption, \"qweqwe\")\n\n @pytest.mark.skipif(\"sys.version_info[0] < 3\")\n def test_config_getoption_unicode(self, testdir):\n testdir.makeconftest(\n \"\"\"\n from __future__ import unicode_literals\n\n def pytest_addoption(parser):\n parser.addoption('--hello', type=str)\n \"\"\"\n )\n config = testdir.parseconfig(\"--hello=this\")\n assert config.getoption(\"hello\") == \"this\"\n\n def test_config_getvalueorskip(self, testdir):\n config = testdir.parseconfig()\n pytest.raises(pytest.skip.Exception, config.getvalueorskip, \"hello\")\n verbose = config.getvalueorskip(\"verbose\")\n assert verbose == config.option.verbose\n\n def test_config_getvalueorskip_None(self, testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_addoption(parser):\n parser.addoption(\"--hello\")\n \"\"\"\n )\n config = testdir.parseconfig()\n with pytest.raises(pytest.skip.Exception):\n config.getvalueorskip(\"hello\")\n\n def test_getoption(self, testdir):\n config = testdir.parseconfig()\n with pytest.raises(ValueError):\n config.getvalue(\"x\")\n assert config.getoption(\"x\", 1) == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_getconftest_pathlist_TestConfigAPI.test_addini.pytest_raises_ValueError_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_getconftest_pathlist_TestConfigAPI.test_addini.pytest_raises_ValueError_", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 256, "end_line": 284, "span_ids": ["TestConfigAPI.test_getconftest_pathlist", "TestConfigAPI.test_addini"], "tokens": 255}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestConfigAPI(object):\n\n def test_getconftest_pathlist(self, testdir, tmpdir):\n somepath = tmpdir.join(\"x\", \"y\", \"z\")\n p = tmpdir.join(\"conftest.py\")\n p.write(\"pathlist = ['.', %r]\" % str(somepath))\n config = testdir.parseconfigure(p)\n assert config._getconftest_pathlist(\"notexist\", path=tmpdir) is None\n pl = config._getconftest_pathlist(\"pathlist\", path=tmpdir)\n print(pl)\n assert len(pl) == 2\n assert pl[0] == tmpdir\n assert pl[1] == somepath\n\n def test_addini(self, testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_addoption(parser):\n parser.addini(\"myname\", \"my new ini value\")\n \"\"\"\n )\n testdir.makeini(\n \"\"\"\n [pytest]\n myname=hello\n \"\"\"\n )\n config = testdir.parseconfig()\n val = config.getini(\"myname\")\n assert val == \"hello\"\n pytest.raises(ValueError, config.getini, \"other\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_addini_pathlist_TestConfigAPI.test_addini_pathlist.pytest_raises_ValueError_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_addini_pathlist_TestConfigAPI.test_addini_pathlist.pytest_raises_ValueError_", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 286, "end_line": 305, "span_ids": ["TestConfigAPI.test_addini_pathlist"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestConfigAPI(object):\n\n def test_addini_pathlist(self, testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_addoption(parser):\n parser.addini(\"paths\", \"my new ini value\", type=\"pathlist\")\n parser.addini(\"abc\", \"abc value\")\n \"\"\"\n )\n p = testdir.makeini(\n \"\"\"\n [pytest]\n paths=hello world/sub.py\n \"\"\"\n )\n config = testdir.parseconfig()\n values = config.getini(\"paths\")\n assert len(values) == 2\n assert values[0] == p.dirpath(\"hello\")\n assert values[1] == p.dirpath(\"world/sub.py\")\n pytest.raises(ValueError, config.getini, \"other\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_addini_args_TestConfigAPI.test_addini_args.assert_values_list_12": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_addini_args_TestConfigAPI.test_addini_args.assert_values_list_12", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 307, "end_line": 326, "span_ids": ["TestConfigAPI.test_addini_args"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestConfigAPI(object):\n\n def test_addini_args(self, testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_addoption(parser):\n parser.addini(\"args\", \"new args\", type=\"args\")\n parser.addini(\"a2\", \"\", \"args\", default=\"1 2 3\".split())\n \"\"\"\n )\n testdir.makeini(\n \"\"\"\n [pytest]\n args=123 \"123 hello\" \"this\"\n \"\"\"\n )\n config = testdir.parseconfig()\n values = config.getini(\"args\")\n assert len(values) == 3\n assert values == [\"123\", \"123 hello\", \"this\"]\n values = config.getini(\"a2\")\n assert values == list(\"123\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_addini_linelist_TestConfigAPI.test_addini_linelist.assert_values_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_addini_linelist_TestConfigAPI.test_addini_linelist.assert_values_", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 328, "end_line": 348, "span_ids": ["TestConfigAPI.test_addini_linelist"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestConfigAPI(object):\n\n def test_addini_linelist(self, testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_addoption(parser):\n parser.addini(\"xy\", \"\", type=\"linelist\")\n parser.addini(\"a2\", \"\", \"linelist\")\n \"\"\"\n )\n testdir.makeini(\n \"\"\"\n [pytest]\n xy= 123 345\n second line\n \"\"\"\n )\n config = testdir.parseconfig()\n values = config.getini(\"xy\")\n assert len(values) == 2\n assert values == [\"123 345\", \"second line\"]\n values = config.getini(\"a2\")\n assert values == []", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_addini_bool_TestConfigAPI.test_addini_bool.assert_config_getini_str": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_addini_bool_TestConfigAPI.test_addini_bool.assert_config_getini_str", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 350, "end_line": 369, "span_ids": ["TestConfigAPI.test_addini_bool"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestConfigAPI(object):\n\n @pytest.mark.parametrize(\n \"str_val, bool_val\", [(\"True\", True), (\"no\", False), (\"no-ini\", True)]\n )\n def test_addini_bool(self, testdir, str_val, bool_val):\n testdir.makeconftest(\n \"\"\"\n def pytest_addoption(parser):\n parser.addini(\"strip\", \"\", type=\"bool\", default=True)\n \"\"\"\n )\n if str_val != \"no-ini\":\n testdir.makeini(\n \"\"\"\n [pytest]\n strip=%s\n \"\"\"\n % str_val\n )\n config = testdir.parseconfig()\n assert config.getini(\"strip\") is bool_val", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_addinivalue_line_existing_TestConfigAPI.test_addinivalue_line_existing.None_3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_addinivalue_line_existing_TestConfigAPI.test_addinivalue_line_existing.None_3", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 371, "end_line": 391, "span_ids": ["TestConfigAPI.test_addinivalue_line_existing"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestConfigAPI(object):\n\n def test_addinivalue_line_existing(self, testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_addoption(parser):\n parser.addini(\"xy\", \"\", type=\"linelist\")\n \"\"\"\n )\n testdir.makeini(\n \"\"\"\n [pytest]\n xy= 123\n \"\"\"\n )\n config = testdir.parseconfig()\n values = config.getini(\"xy\")\n assert len(values) == 1\n assert values == [\"123\"]\n config.addinivalue_line(\"xy\", \"456\")\n values = config.getini(\"xy\")\n assert len(values) == 2\n assert values == [\"123\", \"456\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_addinivalue_line_new_TestConfigAPI.test_addinivalue_line_new.None_4": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_addinivalue_line_new_TestConfigAPI.test_addinivalue_line_new.None_4", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 393, "end_line": 409, "span_ids": ["TestConfigAPI.test_addinivalue_line_new"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestConfigAPI(object):\n\n def test_addinivalue_line_new(self, testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_addoption(parser):\n parser.addini(\"xy\", \"\", type=\"linelist\")\n \"\"\"\n )\n config = testdir.parseconfig()\n assert not config.getini(\"xy\")\n config.addinivalue_line(\"xy\", \"456\")\n values = config.getini(\"xy\")\n assert len(values) == 1\n assert values == [\"456\"]\n config.addinivalue_line(\"xy\", \"123\")\n values = config.getini(\"xy\")\n assert len(values) == 2\n assert values == [\"456\", \"123\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_confcutdir_check_isdir_TestConfigAPI.test_confcutdir_check_isdir.assert_config_getoption_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_confcutdir_check_isdir_TestConfigAPI.test_confcutdir_check_isdir.assert_config_getoption_", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 411, "end_line": 422, "span_ids": ["TestConfigAPI.test_confcutdir_check_isdir"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestConfigAPI(object):\n\n def test_confcutdir_check_isdir(self, testdir):\n \"\"\"Give an error if --confcutdir is not a valid directory (#2078)\"\"\"\n with pytest.raises(pytest.UsageError):\n testdir.parseconfig(\n \"--confcutdir\", testdir.tmpdir.join(\"file\").ensure(file=1)\n )\n with pytest.raises(pytest.UsageError):\n testdir.parseconfig(\"--confcutdir\", testdir.tmpdir.join(\"inexistant\"))\n config = testdir.parseconfig(\n \"--confcutdir\", testdir.tmpdir.join(\"dir\").ensure(dir=1)\n )\n assert config.getoption(\"confcutdir\") == str(testdir.tmpdir.join(\"dir\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_iter_rewritable_modules_TestConfigAPI.test_iter_rewritable_modules.assert_list__iter_rewrita": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigAPI.test_iter_rewritable_modules_TestConfigAPI.test_iter_rewritable_modules.assert_list__iter_rewrita", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 424, "end_line": 435, "span_ids": ["TestConfigAPI.test_iter_rewritable_modules"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestConfigAPI(object):\n\n @pytest.mark.parametrize(\n \"names, expected\",\n [\n ([\"bar.py\"], [\"bar\"]),\n ([\"foo\", \"bar.py\"], []),\n ([\"foo\", \"bar.pyc\"], []),\n ([\"foo\", \"__init__.py\"], [\"foo\"]),\n ([\"foo\", \"bar\", \"__init__.py\"], []),\n ],\n )\n def test_iter_rewritable_modules(self, names, expected):\n assert list(_iter_rewritable_modules([\"/\".join(names)])) == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigFromdictargs_TestConfigFromdictargs.test_origargs.assert_config_option_capt": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigFromdictargs_TestConfigFromdictargs.test_origargs.assert_config_option_capt", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 438, "end_line": 464, "span_ids": ["TestConfigFromdictargs", "TestConfigFromdictargs.test_basic_behavior", "TestConfigFromdictargs.test_origargs"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestConfigFromdictargs(object):\n def test_basic_behavior(self, _sys_snapshot):\n from _pytest.config import Config\n\n option_dict = {\"verbose\": 444, \"foo\": \"bar\", \"capture\": \"no\"}\n args = [\"a\", \"b\"]\n\n config = Config.fromdictargs(option_dict, args)\n with pytest.raises(AssertionError):\n config.parse([\"should refuse to parse again\"])\n assert config.option.verbose == 444\n assert config.option.foo == \"bar\"\n assert config.option.capture == \"no\"\n assert config.args == args\n\n def test_origargs(self, _sys_snapshot):\n \"\"\"Show that fromdictargs can handle args in their \"orig\" format\"\"\"\n from _pytest.config import Config\n\n option_dict = {}\n args = [\"-vvvv\", \"-s\", \"a\", \"b\"]\n\n config = Config.fromdictargs(option_dict, args)\n assert config.args == [\"a\", \"b\"]\n assert config._origargs == args\n assert config.option.verbose == 4\n assert config.option.capture == \"no\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigFromdictargs.test_inifilename_TestConfigFromdictargs.test_inifilename.None_5": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestConfigFromdictargs.test_inifilename_TestConfigFromdictargs.test_inifilename.None_5", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 466, "end_line": 501, "span_ids": ["TestConfigFromdictargs.test_inifilename"], "tokens": 257}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestConfigFromdictargs(object):\n\n def test_inifilename(self, tmpdir):\n tmpdir.join(\"foo/bar.ini\").ensure().write(\n textwrap.dedent(\n \"\"\"\\\n [pytest]\n name = value\n \"\"\"\n )\n )\n\n from _pytest.config import Config\n\n inifile = \"../../foo/bar.ini\"\n option_dict = {\"inifilename\": inifile, \"capture\": \"no\"}\n\n cwd = tmpdir.join(\"a/b\")\n cwd.join(\"pytest.ini\").ensure().write(\n textwrap.dedent(\n \"\"\"\\\n [pytest]\n name = wrong-value\n should_not_be_set = true\n \"\"\"\n )\n )\n with cwd.ensure(dir=True).as_cwd():\n config = Config.fromdictargs(option_dict, ())\n\n assert config.args == [str(cwd)]\n assert config.option.inifilename == inifile\n assert config.option.capture == \"no\"\n\n # this indicates this is the file used for getting configuration values\n assert config.inifile == inifile\n assert config.inicfg.get(\"name\") == \"value\"\n assert config.inicfg.get(\"should_not_be_set\") is None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_options_on_small_file_do_not_blow_up_test_options_on_small_file_do_not_blow_up.for_opts_in_.runfiletest_opts_path_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_options_on_small_file_do_not_blow_up_test_options_on_small_file_do_not_blow_up.for_opts_in_.runfiletest_opts_path_", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 504, "end_line": 530, "span_ids": ["test_options_on_small_file_do_not_blow_up"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_options_on_small_file_do_not_blow_up(testdir):\n def runfiletest(opts):\n reprec = testdir.inline_run(*opts)\n passed, skipped, failed = reprec.countoutcomes()\n assert failed == 2\n assert skipped == passed == 0\n\n path = testdir.makepyfile(\n \"\"\"\n def test_f1(): assert 0\n def test_f2(): assert 0\n \"\"\"\n )\n\n for opts in (\n [],\n [\"-l\"],\n [\"-s\"],\n [\"--tb=no\"],\n [\"--tb=short\"],\n [\"--tb=long\"],\n [\"--fulltrace\"],\n [\"--traceconfig\"],\n [\"-v\"],\n [\"-v\", \"-v\"],\n ):\n runfiletest(opts + [path])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_preparse_ordering_with_setuptools_test_preparse_ordering_with_setuptools.assert_plugin_x_42": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_preparse_ordering_with_setuptools_test_preparse_ordering_with_setuptools.assert_plugin_x_42", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 533, "end_line": 568, "span_ids": ["test_preparse_ordering_with_setuptools", "test_preparse_ordering_with_setuptools.my_iter.EntryPoint.load.PseudoPlugin:2", "test_preparse_ordering_with_setuptools.my_iter.EntryPoint", "test_preparse_ordering_with_setuptools.my_iter.Dist:2", "test_preparse_ordering_with_setuptools.my_iter.Dist", "test_preparse_ordering_with_setuptools.my_iter.EntryPoint:2", "test_preparse_ordering_with_setuptools.my_iter.EntryPoint.load.PseudoPlugin"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_preparse_ordering_with_setuptools(testdir, monkeypatch):\n pkg_resources = pytest.importorskip(\"pkg_resources\")\n monkeypatch.delenv(\"PYTEST_DISABLE_PLUGIN_AUTOLOAD\", raising=False)\n\n def my_iter(group, name=None):\n assert group == \"pytest11\"\n\n class Dist(object):\n project_name = \"spam\"\n version = \"1.0\"\n\n def _get_metadata(self, name):\n return [\"foo.txt,sha256=abc,123\"]\n\n class EntryPoint(object):\n name = \"mytestplugin\"\n dist = Dist()\n\n def load(self):\n class PseudoPlugin(object):\n x = 42\n\n return PseudoPlugin()\n\n return iter([EntryPoint()])\n\n monkeypatch.setattr(pkg_resources, \"iter_entry_points\", my_iter)\n testdir.makeconftest(\n \"\"\"\n pytest_plugins = \"mytestplugin\",\n \"\"\"\n )\n monkeypatch.setenv(\"PYTEST_PLUGINS\", \"mytestplugin\")\n config = testdir.parseconfig()\n plugin = config.pluginmanager.getplugin(\"mytestplugin\")\n assert plugin.x == 42", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_setuptools_importerror_issue1479_test_setuptools_importerror_issue1479.with_pytest_raises_Import.testdir_parseconfig_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_setuptools_importerror_issue1479_test_setuptools_importerror_issue1479.with_pytest_raises_Import.testdir_parseconfig_", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 571, "end_line": 596, "span_ids": ["test_setuptools_importerror_issue1479.my_iter.Dist:2", "test_setuptools_importerror_issue1479.my_iter.EntryPoint", "test_setuptools_importerror_issue1479.my_iter.EntryPoint:2", "test_setuptools_importerror_issue1479", "test_setuptools_importerror_issue1479.my_iter.Dist"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_setuptools_importerror_issue1479(testdir, monkeypatch):\n pkg_resources = pytest.importorskip(\"pkg_resources\")\n monkeypatch.delenv(\"PYTEST_DISABLE_PLUGIN_AUTOLOAD\", raising=False)\n\n def my_iter(group, name=None):\n assert group == \"pytest11\"\n\n class Dist(object):\n project_name = \"spam\"\n version = \"1.0\"\n\n def _get_metadata(self, name):\n return [\"foo.txt,sha256=abc,123\"]\n\n class EntryPoint(object):\n name = \"mytestplugin\"\n dist = Dist()\n\n def load(self):\n raise ImportError(\"Don't hide me!\")\n\n return iter([EntryPoint()])\n\n monkeypatch.setattr(pkg_resources, \"iter_entry_points\", my_iter)\n with pytest.raises(ImportError):\n testdir.parseconfig()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_plugin_preparse_prevents_setuptools_loading_test_plugin_preparse_prevents_setuptools_loading.if_block_it_.else_.assert_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_plugin_preparse_prevents_setuptools_loading_test_plugin_preparse_prevents_setuptools_loading.if_block_it_.else_.assert_", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 599, "end_line": 635, "span_ids": ["test_plugin_preparse_prevents_setuptools_loading.my_iter.Dist:2", "test_plugin_preparse_prevents_setuptools_loading.my_iter.EntryPoint:2", "test_plugin_preparse_prevents_setuptools_loading", "test_plugin_preparse_prevents_setuptools_loading.my_iter.EntryPoint", "test_plugin_preparse_prevents_setuptools_loading.my_iter.Dist"], "tokens": 272}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"block_it\", [True, False])\ndef test_plugin_preparse_prevents_setuptools_loading(testdir, monkeypatch, block_it):\n pkg_resources = pytest.importorskip(\"pkg_resources\")\n monkeypatch.delenv(\"PYTEST_DISABLE_PLUGIN_AUTOLOAD\", raising=False)\n\n plugin_module_placeholder = object()\n\n def my_iter(group, name=None):\n assert group == \"pytest11\"\n\n class Dist(object):\n project_name = \"spam\"\n version = \"1.0\"\n\n def _get_metadata(self, name):\n return [\"foo.txt,sha256=abc,123\"]\n\n class EntryPoint(object):\n name = \"mytestplugin\"\n dist = Dist()\n\n def load(self):\n return plugin_module_placeholder\n\n return iter([EntryPoint()])\n\n monkeypatch.setattr(pkg_resources, \"iter_entry_points\", my_iter)\n args = (\"-p\", \"no:mytestplugin\") if block_it else ()\n config = testdir.parseconfig(*args)\n config.pluginmanager.import_plugin(\"mytestplugin\")\n if block_it:\n assert \"mytestplugin\" not in sys.modules\n assert config.pluginmanager.get_plugin(\"mytestplugin\") is None\n else:\n assert (\n config.pluginmanager.get_plugin(\"mytestplugin\") is plugin_module_placeholder\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_disable_plugin_autoload_test_disable_plugin_autoload.assert_has_loaded_shou": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_disable_plugin_autoload_test_disable_plugin_autoload.assert_has_loaded_shou", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 638, "end_line": 676, "span_ids": ["test_disable_plugin_autoload", "test_disable_plugin_autoload.PseudoPlugin:2", "test_disable_plugin_autoload.PseudoPlugin", "test_disable_plugin_autoload.DummyEntryPoint", "test_disable_plugin_autoload.DummyEntryPoint:2"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"parse_args,should_load\", [((\"-p\", \"mytestplugin\"), True), ((), False)]\n)\ndef test_disable_plugin_autoload(testdir, monkeypatch, parse_args, should_load):\n pkg_resources = pytest.importorskip(\"pkg_resources\")\n\n def my_iter(group, name=None):\n assert group == \"pytest11\"\n assert name == \"mytestplugin\"\n return iter([DummyEntryPoint()])\n\n @attr.s\n class DummyEntryPoint(object):\n name = \"mytestplugin\"\n version = \"1.0\"\n\n @property\n def project_name(self):\n return self.name\n\n def load(self):\n return sys.modules[self.name]\n\n @property\n def dist(self):\n return self\n\n def _get_metadata(self, *args):\n return []\n\n class PseudoPlugin(object):\n x = 42\n\n monkeypatch.setenv(\"PYTEST_DISABLE_PLUGIN_AUTOLOAD\", \"1\")\n monkeypatch.setattr(pkg_resources, \"iter_entry_points\", my_iter)\n monkeypatch.setitem(sys.modules, \"mytestplugin\", PseudoPlugin())\n config = testdir.parseconfig(*parse_args)\n has_loaded = config.pluginmanager.get_plugin(\"mytestplugin\") is not None\n assert has_loaded == should_load", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_cmdline_processargs_simple_test_invalid_options_show_extra_information.result_stderr_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_cmdline_processargs_simple_test_invalid_options_show_extra_information.result_stderr_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 679, "end_line": 706, "span_ids": ["test_invalid_options_show_extra_information", "test_cmdline_processargs_simple"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_cmdline_processargs_simple(testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_cmdline_preparse(args):\n args.append(\"-h\")\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*pytest*\", \"*-h*\"])\n\n\ndef test_invalid_options_show_extra_information(testdir):\n \"\"\"display extra information when pytest exits due to unrecognized\n options in the command-line\"\"\"\n testdir.makeini(\n \"\"\"\n [pytest]\n addopts = --invalid-option\n \"\"\"\n )\n result = testdir.runpytest()\n result.stderr.fnmatch_lines(\n [\n \"*error: unrecognized arguments: --invalid-option*\",\n \"* inifile: %s*\" % testdir.tmpdir.join(\"tox.ini\"),\n \"* rootdir: %s*\" % testdir.tmpdir,\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_consider_args_after_options_for_rootdir_test_consider_args_after_options_for_rootdir.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_consider_args_after_options_for_rootdir_test_consider_args_after_options_for_rootdir.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 709, "end_line": 734, "span_ids": ["test_consider_args_after_options_for_rootdir"], "tokens": 235}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"args\",\n [\n [\"dir1\", \"dir2\", \"-v\"],\n [\"dir1\", \"-v\", \"dir2\"],\n [\"dir2\", \"-v\", \"dir1\"],\n [\"-v\", \"dir2\", \"dir1\"],\n ],\n)\ndef test_consider_args_after_options_for_rootdir(testdir, args):\n \"\"\"\n Consider all arguments in the command-line for rootdir\n discovery, even if they happen to occur after an option. #949\n \"\"\"\n # replace \"dir1\" and \"dir2\" from \"args\" into their real directory\n root = testdir.tmpdir.mkdir(\"myroot\")\n d1 = root.mkdir(\"dir1\")\n d2 = root.mkdir(\"dir2\")\n for i, arg in enumerate(args):\n if arg == \"dir1\":\n args[i] = d1\n elif arg == \"dir2\":\n args[i] = d2\n with root.as_cwd():\n result = testdir.runpytest(*args)\n result.stdout.fnmatch_lines([\"*rootdir: *myroot\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_toolongargs_issue224_test_config_in_subdirectory_colon_command_line_issue2148.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_toolongargs_issue224_test_config_in_subdirectory_colon_command_line_issue2148.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 737, "end_line": 766, "span_ids": ["test_toolongargs_issue224", "test_config_in_subdirectory_colon_command_line_issue2148"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\"sys.platform == 'win32'\")\ndef test_toolongargs_issue224(testdir):\n result = testdir.runpytest(\"-m\", \"hello\" * 500)\n assert result.ret == EXIT_NOTESTSCOLLECTED\n\n\ndef test_config_in_subdirectory_colon_command_line_issue2148(testdir):\n conftest_source = \"\"\"\n def pytest_addoption(parser):\n parser.addini('foo', 'foo')\n \"\"\"\n\n testdir.makefile(\n \".ini\",\n **{\"pytest\": \"[pytest]\\nfoo = root\", \"subdir/pytest\": \"[pytest]\\nfoo = subdir\"}\n )\n\n testdir.makepyfile(\n **{\n \"conftest\": conftest_source,\n \"subdir/conftest\": conftest_source,\n \"subdir/test_foo\": \"\"\"\n def test_foo(pytestconfig):\n assert pytestconfig.getini('foo') == 'subdir'\n \"\"\",\n }\n )\n\n result = testdir.runpytest(\"subdir/test_foo.py::test_foo\")\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_notify_exception_test_notify_exception.None_5": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_notify_exception_test_notify_exception.None_5", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 769, "end_line": 791, "span_ids": ["test_notify_exception.A", "test_notify_exception.A.pytest_internalerror", "test_notify_exception"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_notify_exception(testdir, capfd):\n config = testdir.parseconfig()\n with pytest.raises(ValueError) as excinfo:\n raise ValueError(1)\n config.notify_exception(excinfo, config.option)\n out, err = capfd.readouterr()\n assert \"ValueError\" in err\n\n class A(object):\n def pytest_internalerror(self, excrepr):\n return True\n\n config.pluginmanager.register(A())\n config.notify_exception(excinfo, config.option)\n out, err = capfd.readouterr()\n assert not err\n\n config = testdir.parseconfig(\"-p\", \"no:terminal\")\n with pytest.raises(ValueError) as excinfo:\n raise ValueError(1)\n config.notify_exception(excinfo, config.option)\n out, err = capfd.readouterr()\n assert \"ValueError\" in err", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_load_initial_conftest_last_ordering_test_get_plugin_specs_as_list.None_5": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_load_initial_conftest_last_ordering_test_get_plugin_specs_as_list.None_5", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 794, "end_line": 822, "span_ids": ["test_load_initial_conftest_last_ordering.My", "test_get_plugin_specs_as_list", "test_load_initial_conftest_last_ordering", "test_load_initial_conftest_last_ordering.My.pytest_load_initial_conftests"], "tokens": 274}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_load_initial_conftest_last_ordering(testdir, _config_for_test):\n pm = _config_for_test.pluginmanager\n\n class My(object):\n def pytest_load_initial_conftests(self):\n pass\n\n m = My()\n pm.register(m)\n hc = pm.hook.pytest_load_initial_conftests\n values = hc._nonwrappers + hc._wrappers\n expected = [\"_pytest.config\", \"test_config\", \"_pytest.capture\"]\n assert [x.function.__module__ for x in values] == expected\n\n\ndef test_get_plugin_specs_as_list():\n from _pytest.config import _get_plugin_specs_as_list\n\n with pytest.raises(pytest.UsageError):\n _get_plugin_specs_as_list({\"foo\"})\n with pytest.raises(pytest.UsageError):\n _get_plugin_specs_as_list(dict())\n\n assert _get_plugin_specs_as_list(None) == []\n assert _get_plugin_specs_as_list(\"\") == []\n assert _get_plugin_specs_as_list(\"foo\") == [\"foo\"]\n assert _get_plugin_specs_as_list(\"foo,bar\") == [\"foo\", \"bar\"]\n assert _get_plugin_specs_as_list([\"foo\", \"bar\"]) == [\"foo\", \"bar\"]\n assert _get_plugin_specs_as_list((\"foo\", \"bar\")) == [\"foo\", \"bar\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_collect_pytest_prefix_bug_integration_test_collect_pytest_prefix_bug.assert_pm_parse_hookimpl_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_collect_pytest_prefix_bug_integration_test_collect_pytest_prefix_bug.assert_pm_parse_hookimpl_", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 825, "end_line": 840, "span_ids": ["test_collect_pytest_prefix_bug.Dummy", "test_collect_pytest_prefix_bug.Dummy.pytest_something:2", "test_collect_pytest_prefix_bug", "test_collect_pytest_prefix_bug_integration"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_collect_pytest_prefix_bug_integration(testdir):\n \"\"\"Integration test for issue #3775\"\"\"\n p = testdir.copy_example(\"config/collect_pytest_prefix\")\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines([\"* 1 passed *\"])\n\n\ndef test_collect_pytest_prefix_bug(pytestconfig):\n \"\"\"Ensure we collect only actual functions from conftest files (#3775)\"\"\"\n\n class Dummy(object):\n class pytest_something(object):\n pass\n\n pm = pytestconfig.pluginmanager\n assert pm.parse_hookimpl_opts(Dummy(), \"pytest_something\") is None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestRootdir_TestRootdir.test_simple_noini.with_tmpdir_as_cwd_.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestRootdir_TestRootdir.test_simple_noini.with_tmpdir_as_cwd_.None_2", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 843, "end_line": 853, "span_ids": ["TestRootdir.test_simple_noini", "TestRootdir"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRootdir(object):\n def test_simple_noini(self, tmpdir):\n assert get_common_ancestor([tmpdir]) == tmpdir\n a = tmpdir.mkdir(\"a\")\n assert get_common_ancestor([a, tmpdir]) == tmpdir\n assert get_common_ancestor([tmpdir, a]) == tmpdir\n with tmpdir.as_cwd():\n assert get_common_ancestor([]) == tmpdir\n no_path = tmpdir.join(\"does-not-exist\")\n assert get_common_ancestor([no_path]) == tmpdir\n assert get_common_ancestor([no_path.join(\"a\")]) == tmpdir", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestRootdir.test_with_ini_TestRootdir.test_with_ini.assert_inifile_inifile": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestRootdir.test_with_ini_TestRootdir.test_with_ini.assert_inifile_inifile", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 855, "end_line": 868, "span_ids": ["TestRootdir.test_with_ini"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRootdir(object):\n\n @pytest.mark.parametrize(\"name\", \"setup.cfg tox.ini pytest.ini\".split())\n def test_with_ini(self, tmpdir, name):\n inifile = tmpdir.join(name)\n inifile.write(\"[pytest]\\n\" if name != \"setup.cfg\" else \"[tool:pytest]\\n\")\n\n a = tmpdir.mkdir(\"a\")\n b = a.mkdir(\"b\")\n for args in ([tmpdir], [a], [b]):\n rootdir, inifile, inicfg = determine_setup(None, args)\n assert rootdir == tmpdir\n assert inifile == inifile\n rootdir, inifile, inicfg = determine_setup(None, [b, a])\n assert rootdir == tmpdir\n assert inifile == inifile", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestRootdir.test_pytestini_overrides_empty_other_TestRootdir.test_with_specific_inifile.assert_rootdir_tmpdir": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestRootdir.test_pytestini_overrides_empty_other_TestRootdir.test_with_specific_inifile.assert_rootdir_tmpdir", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 870, "end_line": 898, "span_ids": ["TestRootdir.test_setuppy_fallback", "TestRootdir.test_pytestini_overrides_empty_other", "TestRootdir.test_nothing", "TestRootdir.test_with_specific_inifile"], "tokens": 298}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestRootdir(object):\n\n @pytest.mark.parametrize(\"name\", \"setup.cfg tox.ini\".split())\n def test_pytestini_overrides_empty_other(self, tmpdir, name):\n inifile = tmpdir.ensure(\"pytest.ini\")\n a = tmpdir.mkdir(\"a\")\n a.ensure(name)\n rootdir, inifile, inicfg = determine_setup(None, [a])\n assert rootdir == tmpdir\n assert inifile == inifile\n\n def test_setuppy_fallback(self, tmpdir):\n a = tmpdir.mkdir(\"a\")\n a.ensure(\"setup.cfg\")\n tmpdir.ensure(\"setup.py\")\n rootdir, inifile, inicfg = determine_setup(None, [a])\n assert rootdir == tmpdir\n assert inifile is None\n assert inicfg == {}\n\n def test_nothing(self, tmpdir, monkeypatch):\n monkeypatch.chdir(str(tmpdir))\n rootdir, inifile, inicfg = determine_setup(None, [tmpdir])\n assert rootdir == tmpdir\n assert inifile is None\n assert inicfg == {}\n\n def test_with_specific_inifile(self, tmpdir):\n inifile = tmpdir.ensure(\"pytest.ini\")\n rootdir, inifile, inicfg = determine_setup(inifile, [tmpdir])\n assert rootdir == tmpdir", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestOverrideIniArgs_TestOverrideIniArgs.test_override_ini_names.None_4": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestOverrideIniArgs_TestOverrideIniArgs.test_override_ini_names.None_4", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 901, "end_line": 934, "span_ids": ["TestOverrideIniArgs.test_override_ini_names", "TestOverrideIniArgs"], "tokens": 278}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestOverrideIniArgs(object):\n @pytest.mark.parametrize(\"name\", \"setup.cfg tox.ini pytest.ini\".split())\n def test_override_ini_names(self, testdir, name):\n section = \"[pytest]\" if name != \"setup.cfg\" else \"[tool:pytest]\"\n testdir.tmpdir.join(name).write(\n textwrap.dedent(\n \"\"\"\n {section}\n custom = 1.0\"\"\".format(\n section=section\n )\n )\n )\n testdir.makeconftest(\n \"\"\"\n def pytest_addoption(parser):\n parser.addini(\"custom\", \"\")\"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n def test_pass(pytestconfig):\n ini_val = pytestconfig.getini(\"custom\")\n print('\\\\ncustom_option:%s\\\\n' % ini_val)\"\"\"\n )\n\n result = testdir.runpytest(\"--override-ini\", \"custom=2.0\", \"-s\")\n assert result.ret == 0\n result.stdout.fnmatch_lines([\"custom_option:2.0\"])\n\n result = testdir.runpytest(\n \"--override-ini\", \"custom=2.0\", \"--override-ini=custom=3.0\", \"-s\"\n )\n assert result.ret == 0\n result.stdout.fnmatch_lines([\"custom_option:3.0\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestOverrideIniArgs.test_override_ini_pathlist_TestOverrideIniArgs.test_override_ini_pathlist.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestOverrideIniArgs.test_override_ini_pathlist_TestOverrideIniArgs.test_override_ini_pathlist.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 936, "end_line": 959, "span_ids": ["TestOverrideIniArgs.test_override_ini_pathlist"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestOverrideIniArgs(object):\n\n def test_override_ini_pathlist(self, testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_addoption(parser):\n parser.addini(\"paths\", \"my new ini value\", type=\"pathlist\")\"\"\"\n )\n testdir.makeini(\n \"\"\"\n [pytest]\n paths=blah.py\"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n import py.path\n def test_pathlist(pytestconfig):\n config_paths = pytestconfig.getini(\"paths\")\n print(config_paths)\n for cpf in config_paths:\n print('\\\\nuser_path:%s' % cpf.basename)\"\"\"\n )\n result = testdir.runpytest(\n \"--override-ini\", \"paths=foo/bar1.py foo/bar2.py\", \"-s\"\n )\n result.stdout.fnmatch_lines([\"user_path:bar1.py\", \"user_path:bar2.py\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestOverrideIniArgs.test_override_multiple_and_default_TestOverrideIniArgs.test_override_multiple_and_default.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestOverrideIniArgs.test_override_multiple_and_default_TestOverrideIniArgs.test_override_multiple_and_default.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 961, "end_line": 1005, "span_ids": ["TestOverrideIniArgs.test_override_multiple_and_default"], "tokens": 359}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestOverrideIniArgs(object):\n\n def test_override_multiple_and_default(self, testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_addoption(parser):\n addini = parser.addini\n addini(\"custom_option_1\", \"\", default=\"o1\")\n addini(\"custom_option_2\", \"\", default=\"o2\")\n addini(\"custom_option_3\", \"\", default=False, type=\"bool\")\n addini(\"custom_option_4\", \"\", default=True, type=\"bool\")\"\"\"\n )\n testdir.makeini(\n \"\"\"\n [pytest]\n custom_option_1=custom_option_1\n custom_option_2=custom_option_2\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n def test_multiple_options(pytestconfig):\n prefix = \"custom_option\"\n for x in range(1, 5):\n ini_value=pytestconfig.getini(\"%s_%d\" % (prefix, x))\n print('\\\\nini%d:%s' % (x, ini_value))\n \"\"\"\n )\n result = testdir.runpytest(\n \"--override-ini\",\n \"custom_option_1=fulldir=/tmp/user1\",\n \"-o\",\n \"custom_option_2=url=/tmp/user2?a=b&d=e\",\n \"-o\",\n \"custom_option_3=True\",\n \"-o\",\n \"custom_option_4=no\",\n \"-s\",\n )\n result.stdout.fnmatch_lines(\n [\n \"ini1:fulldir=/tmp/user1\",\n \"ini2:url=/tmp/user2?a=b&d=e\",\n \"ini3:True\",\n \"ini4:False\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestOverrideIniArgs.test_override_ini_usage_error_bad_style_TestOverrideIniArgs.test_override_ini_handled_asap.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestOverrideIniArgs.test_override_ini_usage_error_bad_style_TestOverrideIniArgs.test_override_ini_handled_asap.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 1007, "end_line": 1034, "span_ids": ["TestOverrideIniArgs.test_override_ini_usage_error_bad_style", "TestOverrideIniArgs.test_override_ini_handled_asap"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestOverrideIniArgs(object):\n\n def test_override_ini_usage_error_bad_style(self, testdir):\n testdir.makeini(\n \"\"\"\n [pytest]\n xdist_strict=False\n \"\"\"\n )\n result = testdir.runpytest(\"--override-ini\", \"xdist_strict True\", \"-s\")\n result.stderr.fnmatch_lines([\"*ERROR* *expects option=value*\"])\n\n @pytest.mark.parametrize(\"with_ini\", [True, False])\n def test_override_ini_handled_asap(self, testdir, with_ini):\n \"\"\"-o should be handled as soon as possible and always override what's in ini files (#2238)\"\"\"\n if with_ini:\n testdir.makeini(\n \"\"\"\n [pytest]\n python_files=test_*.py\n \"\"\"\n )\n testdir.makepyfile(\n unittest_ini_handle=\"\"\"\n def test():\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"--override-ini\", \"python_files=unittest_*.py\")\n result.stdout.fnmatch_lines([\"*1 passed in*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestOverrideIniArgs.test_with_arg_outside_cwd_without_inifile_TestOverrideIniArgs.test_addopts_before_initini.assert_config__override_i": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestOverrideIniArgs.test_with_arg_outside_cwd_without_inifile_TestOverrideIniArgs.test_addopts_before_initini.assert_config__override_i", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 1036, "end_line": 1072, "span_ids": ["TestOverrideIniArgs.test_with_existing_file_in_subdir", "TestOverrideIniArgs.test_with_arg_outside_cwd_with_inifile", "TestOverrideIniArgs.test_addopts_before_initini", "TestOverrideIniArgs.test_with_arg_outside_cwd_without_inifile", "TestOverrideIniArgs.test_with_non_dir_arg"], "tokens": 424}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestOverrideIniArgs(object):\n\n def test_with_arg_outside_cwd_without_inifile(self, tmpdir, monkeypatch):\n monkeypatch.chdir(str(tmpdir))\n a = tmpdir.mkdir(\"a\")\n b = tmpdir.mkdir(\"b\")\n rootdir, inifile, inicfg = determine_setup(None, [a, b])\n assert rootdir == tmpdir\n assert inifile is None\n\n def test_with_arg_outside_cwd_with_inifile(self, tmpdir):\n a = tmpdir.mkdir(\"a\")\n b = tmpdir.mkdir(\"b\")\n inifile = a.ensure(\"pytest.ini\")\n rootdir, parsed_inifile, inicfg = determine_setup(None, [a, b])\n assert rootdir == a\n assert inifile == parsed_inifile\n\n @pytest.mark.parametrize(\"dirs\", ([], [\"does-not-exist\"], [\"a/does-not-exist\"]))\n def test_with_non_dir_arg(self, dirs, tmpdir):\n with tmpdir.ensure(dir=True).as_cwd():\n rootdir, inifile, inicfg = determine_setup(None, dirs)\n assert rootdir == tmpdir\n assert inifile is None\n\n def test_with_existing_file_in_subdir(self, tmpdir):\n a = tmpdir.mkdir(\"a\")\n a.ensure(\"exist\")\n with tmpdir.as_cwd():\n rootdir, inifile, inicfg = determine_setup(None, [\"a/exist\"])\n assert rootdir == tmpdir\n assert inifile is None\n\n def test_addopts_before_initini(self, monkeypatch, _config_for_test, _sys_snapshot):\n cache_dir = \".custom_cache\"\n monkeypatch.setenv(\"PYTEST_ADDOPTS\", \"-o cache_dir=%s\" % cache_dir)\n config = _config_for_test\n config._preparse([], addopts=True)\n assert config._override_ini == [\"cache_dir=%s\" % cache_dir]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestOverrideIniArgs.test_addopts_from_env_not_concatenated_TestOverrideIniArgs.test_addopts_from_env_not_concatenated.assert_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestOverrideIniArgs.test_addopts_from_env_not_concatenated_TestOverrideIniArgs.test_addopts_from_env_not_concatenated.assert_", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 1074, "end_line": 1083, "span_ids": ["TestOverrideIniArgs.test_addopts_from_env_not_concatenated"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestOverrideIniArgs(object):\n\n def test_addopts_from_env_not_concatenated(self, monkeypatch, _config_for_test):\n \"\"\"PYTEST_ADDOPTS should not take values from normal args (#4265).\"\"\"\n monkeypatch.setenv(\"PYTEST_ADDOPTS\", \"-o\")\n config = _config_for_test\n with pytest.raises(UsageError) as excinfo:\n config._preparse([\"cache_dir=ignored\"], addopts=True)\n assert (\n \"error: argument -o/--override-ini: expected one argument (via PYTEST_ADDOPTS)\"\n in excinfo.value.args[0]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestOverrideIniArgs.test_addopts_from_ini_not_concatenated_TestOverrideIniArgs.test_override_ini_does_not_contain_paths.assert_config__override_i": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestOverrideIniArgs.test_addopts_from_ini_not_concatenated_TestOverrideIniArgs.test_override_ini_does_not_contain_paths.assert_config__override_i", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 1085, "end_line": 1106, "span_ids": ["TestOverrideIniArgs.test_addopts_from_ini_not_concatenated", "TestOverrideIniArgs.test_override_ini_does_not_contain_paths"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestOverrideIniArgs(object):\n\n def test_addopts_from_ini_not_concatenated(self, testdir):\n \"\"\"addopts from ini should not take values from normal args (#4265).\"\"\"\n testdir.makeini(\n \"\"\"\n [pytest]\n addopts=-o\n \"\"\"\n )\n result = testdir.runpytest(\"cache_dir=ignored\")\n result.stderr.fnmatch_lines(\n [\n \"%s: error: argument -o/--override-ini: expected one argument (via addopts config)\"\n % (testdir.request.config._parser.optparser.prog,)\n ]\n )\n assert result.ret == _pytest.main.EXIT_USAGEERROR\n\n def test_override_ini_does_not_contain_paths(self, _config_for_test, _sys_snapshot):\n \"\"\"Check that -o no longer swallows all options after it (#3103)\"\"\"\n config = _config_for_test\n config._preparse([\"-o\", \"cache_dir=/cache\", \"/some/test/path\"])\n assert config._override_ini == [\"cache_dir=/cache\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestOverrideIniArgs.test_multiple_override_ini_options_test_help_via_addopts.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_TestOverrideIniArgs.test_multiple_override_ini_options_test_help_via_addopts.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 1108, "end_line": 1149, "span_ids": ["test_help_via_addopts", "TestOverrideIniArgs.test_multiple_override_ini_options"], "tokens": 330}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestOverrideIniArgs(object):\n\n def test_multiple_override_ini_options(self, testdir, request):\n \"\"\"Ensure a file path following a '-o' option does not generate an error (#3103)\"\"\"\n testdir.makepyfile(\n **{\n \"conftest.py\": \"\"\"\n def pytest_addoption(parser):\n parser.addini('foo', default=None, help='some option')\n parser.addini('bar', default=None, help='some option')\n \"\"\",\n \"test_foo.py\": \"\"\"\n def test(pytestconfig):\n assert pytestconfig.getini('foo') == '1'\n assert pytestconfig.getini('bar') == '0'\n \"\"\",\n \"test_bar.py\": \"\"\"\n def test():\n assert False\n \"\"\",\n }\n )\n result = testdir.runpytest(\"-o\", \"foo=1\", \"-o\", \"bar=0\", \"test_foo.py\")\n assert \"ERROR:\" not in result.stderr.str()\n result.stdout.fnmatch_lines([\"collected 1 item\", \"*= 1 passed in *=\"])\n\n\ndef test_help_via_addopts(testdir):\n testdir.makeini(\n \"\"\"\n [pytest]\n addopts = --unknown-option-should-allow-for-help --help\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret == 0\n result.stdout.fnmatch_lines(\n [\n \"usage: *\",\n \"positional arguments:\",\n # Displays full/default help.\n \"to see available markers type: pytest --markers\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_help_and_version_after_argument_error_test_help_and_version_after_argument_error.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_help_and_version_after_argument_error_test_help_and_version_after_argument_error.None_2", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 1152, "end_line": 1195, "span_ids": ["test_help_and_version_after_argument_error"], "tokens": 295}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_help_and_version_after_argument_error(testdir):\n testdir.makeconftest(\n \"\"\"\n def validate(arg):\n raise argparse.ArgumentTypeError(\"argerror\")\n\n def pytest_addoption(parser):\n group = parser.getgroup('cov')\n group.addoption(\n \"--invalid-option-should-allow-for-help\",\n type=validate,\n )\n \"\"\"\n )\n testdir.makeini(\n \"\"\"\n [pytest]\n addopts = --invalid-option-should-allow-for-help\n \"\"\"\n )\n result = testdir.runpytest(\"--help\")\n result.stdout.fnmatch_lines(\n [\n \"usage: *\",\n \"positional arguments:\",\n \"NOTE: displaying only minimal help due to UsageError.\",\n ]\n )\n result.stderr.fnmatch_lines(\n [\n \"ERROR: usage: *\",\n \"%s: error: argument --invalid-option-should-allow-for-help: expected one argument\"\n % (testdir.request.config._parser.optparser.prog,),\n ]\n )\n # Does not display full/default help.\n assert \"to see available markers type: pytest --markers\" not in result.stdout.lines\n assert result.ret == EXIT_USAGEERROR\n\n result = testdir.runpytest(\"--version\")\n result.stderr.fnmatch_lines(\n [\"*pytest*{}*imported from*\".format(pytest.__version__)]\n )\n assert result.ret == EXIT_USAGEERROR", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_config_does_not_load_blocked_plugin_from_args_test_config_does_not_load_blocked_plugin_from_args.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_config_does_not_load_blocked_plugin_from_args_test_config_does_not_load_blocked_plugin_from_args.None_1", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 1198, "end_line": 1207, "span_ids": ["test_config_does_not_load_blocked_plugin_from_args"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_config_does_not_load_blocked_plugin_from_args(testdir):\n \"\"\"This tests that pytest's config setup handles \"-p no:X\".\"\"\"\n p = testdir.makepyfile(\"def test(capfd): pass\")\n result = testdir.runpytest(str(p), \"-pno:capture\")\n result.stdout.fnmatch_lines([\"E fixture 'capfd' not found\"])\n assert result.ret == EXIT_TESTSFAILED\n\n result = testdir.runpytest(str(p), \"-pno:capture\", \"-s\")\n result.stderr.fnmatch_lines([\"*: error: unrecognized arguments: -s\"])\n assert result.ret == EXIT_USAGEERROR", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_config_blocked_default_plugins_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_config.py_test_config_blocked_default_plugins_", "embedding": null, "metadata": {"file_path": "testing/test_config.py", "file_name": "test_config.py", "file_type": "text/x-python", "category": "test", "start_line": 1210, "end_line": 1242, "span_ids": ["test_config_blocked_default_plugins"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"plugin\",\n [\n x\n for x in _pytest.config.default_plugins\n if x not in _pytest.config.essential_plugins\n ],\n)\ndef test_config_blocked_default_plugins(testdir, plugin):\n if plugin == \"debugging\":\n # Fixed in xdist master (after 1.27.0).\n # https://github.com/pytest-dev/pytest-xdist/pull/422\n try:\n import xdist # noqa: F401\n except ImportError:\n pass\n else:\n pytest.skip(\"does not work with xdist currently\")\n\n p = testdir.makepyfile(\"def test(): pass\")\n result = testdir.runpytest(str(p), \"-pno:%s\" % plugin)\n assert result.ret == EXIT_OK\n if plugin != \"terminal\":\n result.stdout.fnmatch_lines([\"* 1 passed in *\"])\n\n p = testdir.makepyfile(\"def test(): assert 0\")\n result = testdir.runpytest(str(p), \"-pno:%s\" % plugin)\n assert result.ret == EXIT_TESTSFAILED\n if plugin != \"terminal\":\n result.stdout.fnmatch_lines([\"* 1 failed in *\"])\n else:\n assert result.stdout.lines == [\"\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_from___future___import_ab_conftest_setinitial.conftest__set_initial_con": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_from___future___import_ab_conftest_setinitial.conftest__set_initial_con", "embedding": null, "metadata": {"file_path": "testing/test_conftest.py", "file_name": "test_conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 30, "span_ids": ["conftest_setinitial", "ConftestWithSetinitial", "conftest_setinitial.Namespace", "conftest_setinitial.Namespace.__init__", "imports"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport textwrap\n\nimport py\n\nimport pytest\nfrom _pytest.config import PytestPluginManager\nfrom _pytest.main import EXIT_NOTESTSCOLLECTED\nfrom _pytest.main import EXIT_OK\nfrom _pytest.main import EXIT_USAGEERROR\n\n\ndef ConftestWithSetinitial(path):\n conftest = PytestPluginManager()\n conftest_setinitial(conftest, [path])\n return conftest\n\n\ndef conftest_setinitial(conftest, args, confcutdir=None):\n class Namespace(object):\n def __init__(self):\n self.file_or_dir = args\n self.confcutdir = str(confcutdir)\n self.noconftest = False\n self.pyargs = False\n\n conftest._set_initial_conftests(Namespace())", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_TestConftestValueAccessGlobal_TestConftestValueAccessGlobal.test_basic_init.assert_conftest__rget_wit": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_TestConftestValueAccessGlobal_TestConftestValueAccessGlobal.test_basic_init.assert_conftest__rget_wit", "embedding": null, "metadata": {"file_path": "testing/test_conftest.py", "file_name": "test_conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 33, "end_line": 49, "span_ids": ["TestConftestValueAccessGlobal.basedir", "TestConftestValueAccessGlobal.test_basic_init", "TestConftestValueAccessGlobal"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.usefixtures(\"_sys_snapshot\")\nclass TestConftestValueAccessGlobal(object):\n @pytest.fixture(scope=\"module\", params=[\"global\", \"inpackage\"])\n def basedir(self, request, tmpdir_factory):\n tmpdir = tmpdir_factory.mktemp(\"basedir\", numbered=True)\n tmpdir.ensure(\"adir/conftest.py\").write(\"a=1 ; Directory = 3\")\n tmpdir.ensure(\"adir/b/conftest.py\").write(\"b=2 ; a = 1.5\")\n if request.param == \"inpackage\":\n tmpdir.ensure(\"adir/__init__.py\")\n tmpdir.ensure(\"adir/b/__init__.py\")\n\n yield tmpdir\n\n def test_basic_init(self, basedir):\n conftest = PytestPluginManager()\n p = basedir.join(\"adir\")\n assert conftest._rget_with_confmod(\"a\", p)[1] == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_TestConftestValueAccessGlobal.test_immediate_initialiation_and_incremental_are_the_same_TestConftestValueAccessGlobal.test_immediate_initialiation_and_incremental_are_the_same.None_3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_TestConftestValueAccessGlobal.test_immediate_initialiation_and_incremental_are_the_same_TestConftestValueAccessGlobal.test_immediate_initialiation_and_incremental_are_the_same.None_3", "embedding": null, "metadata": {"file_path": "testing/test_conftest.py", "file_name": "test_conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 51, "end_line": 60, "span_ids": ["TestConftestValueAccessGlobal.test_immediate_initialiation_and_incremental_are_the_same"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.usefixtures(\"_sys_snapshot\")\nclass TestConftestValueAccessGlobal(object):\n\n def test_immediate_initialiation_and_incremental_are_the_same(self, basedir):\n conftest = PytestPluginManager()\n assert not len(conftest._dirpath2confmods)\n conftest._getconftestmodules(basedir)\n snap1 = len(conftest._dirpath2confmods)\n assert snap1 == 1\n conftest._getconftestmodules(basedir.join(\"adir\"))\n assert len(conftest._dirpath2confmods) == snap1 + 1\n conftest._getconftestmodules(basedir.join(\"b\"))\n assert len(conftest._dirpath2confmods) == snap1 + 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_TestConftestValueAccessGlobal.test_value_access_not_existing_TestConftestValueAccessGlobal.test_value_access_by_path.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_TestConftestValueAccessGlobal.test_value_access_not_existing_TestConftestValueAccessGlobal.test_value_access_by_path.None_1", "embedding": null, "metadata": {"file_path": "testing/test_conftest.py", "file_name": "test_conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 62, "end_line": 71, "span_ids": ["TestConftestValueAccessGlobal.test_value_access_by_path", "TestConftestValueAccessGlobal.test_value_access_not_existing"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.usefixtures(\"_sys_snapshot\")\nclass TestConftestValueAccessGlobal(object):\n\n def test_value_access_not_existing(self, basedir):\n conftest = ConftestWithSetinitial(basedir)\n with pytest.raises(KeyError):\n conftest._rget_with_confmod(\"a\", basedir)\n\n def test_value_access_by_path(self, basedir):\n conftest = ConftestWithSetinitial(basedir)\n adir = basedir.join(\"adir\")\n assert conftest._rget_with_confmod(\"a\", adir)[1] == 1\n assert conftest._rget_with_confmod(\"a\", adir.join(\"b\"))[1] == 1.5", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_TestConftestValueAccessGlobal.test_value_access_with_confmod_TestConftestValueAccessGlobal.test_value_access_with_confmod.assert_path_purebasename_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_TestConftestValueAccessGlobal.test_value_access_with_confmod_TestConftestValueAccessGlobal.test_value_access_with_confmod.assert_path_purebasename_", "embedding": null, "metadata": {"file_path": "testing/test_conftest.py", "file_name": "test_conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 73, "end_line": 81, "span_ids": ["TestConftestValueAccessGlobal.test_value_access_with_confmod"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.usefixtures(\"_sys_snapshot\")\nclass TestConftestValueAccessGlobal(object):\n\n def test_value_access_with_confmod(self, basedir):\n startdir = basedir.join(\"adir\", \"b\")\n startdir.ensure(\"xx\", dir=True)\n conftest = ConftestWithSetinitial(startdir)\n mod, value = conftest._rget_with_confmod(\"a\", startdir)\n assert value == 1.5\n path = py.path.local(mod.__file__)\n assert path.dirpath() == basedir.join(\"adir\", \"b\")\n assert path.purebasename.startswith(\"conftest\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftest_in_nonpkg_with_init_test_conftest_in_nonpkg_with_init.ConftestWithSetinitial_tm": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftest_in_nonpkg_with_init_test_conftest_in_nonpkg_with_init.ConftestWithSetinitial_tm", "embedding": null, "metadata": {"file_path": "testing/test_conftest.py", "file_name": "test_conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 84, "end_line": 89, "span_ids": ["test_conftest_in_nonpkg_with_init"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_conftest_in_nonpkg_with_init(tmpdir, _sys_snapshot):\n tmpdir.ensure(\"adir-1.0/conftest.py\").write(\"a=1 ; Directory = 3\")\n tmpdir.ensure(\"adir-1.0/b/conftest.py\").write(\"b=2 ; a = 1.5\")\n tmpdir.ensure(\"adir-1.0/b/__init__.py\")\n tmpdir.ensure(\"adir-1.0/__init__.py\")\n ConftestWithSetinitial(tmpdir.join(\"adir-1.0\", \"b\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_doubledash_considered_test_issue151_load_all_conftests.assert_len_d_len_name": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_doubledash_considered_test_issue151_load_all_conftests.assert_len_d_len_name", "embedding": null, "metadata": {"file_path": "testing/test_conftest.py", "file_name": "test_conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 92, "end_line": 110, "span_ids": ["test_doubledash_considered", "test_issue151_load_all_conftests"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_doubledash_considered(testdir):\n conf = testdir.mkdir(\"--option\")\n conf.ensure(\"conftest.py\")\n conftest = PytestPluginManager()\n conftest_setinitial(conftest, [conf.basename, conf.basename])\n values = conftest._getconftestmodules(conf)\n assert len(values) == 1\n\n\ndef test_issue151_load_all_conftests(testdir):\n names = \"code proj src\".split()\n for name in names:\n p = testdir.mkdir(name)\n p.ensure(\"conftest.py\")\n\n conftest = PytestPluginManager()\n conftest_setinitial(conftest, names)\n d = list(conftest._conftestpath2mod.values())\n assert len(d) == len(names)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftest_global_import_test_conftest_global_import.assert_res_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftest_global_import_test_conftest_global_import.assert_res_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_conftest.py", "file_name": "test_conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 113, "end_line": 134, "span_ids": ["test_conftest_global_import"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_conftest_global_import(testdir):\n testdir.makeconftest(\"x=3\")\n p = testdir.makepyfile(\n \"\"\"\n import py, pytest\n from _pytest.config import PytestPluginManager\n conf = PytestPluginManager()\n mod = conf._importconftest(py.path.local(\"conftest.py\"))\n assert mod.x == 3\n import conftest\n assert conftest is mod, (conftest, mod)\n subconf = py.path.local().ensure(\"sub\", \"conftest.py\")\n subconf.write(\"y=4\")\n mod2 = conf._importconftest(subconf)\n assert mod != mod2\n assert mod2.y == 4\n import conftest\n assert conftest is mod2, (conftest, mod)\n \"\"\"\n )\n res = testdir.runpython(p)\n assert res.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftestcutdir_test_conftestcutdir.None_5": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftestcutdir_test_conftestcutdir.None_5", "embedding": null, "metadata": {"file_path": "testing/test_conftest.py", "file_name": "test_conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 137, "end_line": 154, "span_ids": ["test_conftestcutdir"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_conftestcutdir(testdir):\n conf = testdir.makeconftest(\"\")\n p = testdir.mkdir(\"x\")\n conftest = PytestPluginManager()\n conftest_setinitial(conftest, [testdir.tmpdir], confcutdir=p)\n values = conftest._getconftestmodules(p)\n assert len(values) == 0\n values = conftest._getconftestmodules(conf.dirpath())\n assert len(values) == 0\n assert conf not in conftest._conftestpath2mod\n # but we can still import a conftest directly\n conftest._importconftest(conf)\n values = conftest._getconftestmodules(conf.dirpath())\n assert values[0].__file__.startswith(str(conf))\n # and all sub paths get updated properly\n values = conftest._getconftestmodules(p)\n assert len(values) == 1\n assert values[0].__file__.startswith(str(conf))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftestcutdir_inplace_considered_test_setinitial_conftest_subdirs.if_name_not_in_whatever.else_.assert_len_conftest__conf": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftestcutdir_inplace_considered_test_setinitial_conftest_subdirs.if_name_not_in_whatever.else_.assert_len_conftest__conf", "embedding": null, "metadata": {"file_path": "testing/test_conftest.py", "file_name": "test_conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 157, "end_line": 177, "span_ids": ["test_setinitial_conftest_subdirs", "test_conftestcutdir_inplace_considered"], "tokens": 268}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_conftestcutdir_inplace_considered(testdir):\n conf = testdir.makeconftest(\"\")\n conftest = PytestPluginManager()\n conftest_setinitial(conftest, [conf.dirpath()], confcutdir=conf.dirpath())\n values = conftest._getconftestmodules(conf.dirpath())\n assert len(values) == 1\n assert values[0].__file__.startswith(str(conf))\n\n\n@pytest.mark.parametrize(\"name\", \"test tests whatever .dotdir\".split())\ndef test_setinitial_conftest_subdirs(testdir, name):\n sub = testdir.mkdir(name)\n subconftest = sub.ensure(\"conftest.py\")\n conftest = PytestPluginManager()\n conftest_setinitial(conftest, [sub.dirpath()], confcutdir=testdir.tmpdir)\n if name not in (\"whatever\", \".dotdir\"):\n assert subconftest in conftest._conftestpath2mod\n assert len(conftest._conftestpath2mod) == 1\n else:\n assert subconftest not in conftest._conftestpath2mod\n assert len(conftest._conftestpath2mod) == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftest_confcutdir_test_conftest_confcutdir.assert_warning_could_no": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftest_confcutdir_test_conftest_confcutdir.assert_warning_could_no", "embedding": null, "metadata": {"file_path": "testing/test_conftest.py", "file_name": "test_conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 180, "end_line": 193, "span_ids": ["test_conftest_confcutdir"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_conftest_confcutdir(testdir):\n testdir.makeconftest(\"assert 0\")\n x = testdir.mkdir(\"x\")\n x.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n def pytest_addoption(parser):\n parser.addoption(\"--xyz\", action=\"store_true\")\n \"\"\"\n )\n )\n result = testdir.runpytest(\"-h\", \"--confcutdir=%s\" % x, x)\n result.stdout.fnmatch_lines([\"*--xyz*\"])\n assert \"warning: could not load initial\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftest_symlink_test_conftest_symlink.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftest_symlink_test_conftest_symlink.None_2", "embedding": null, "metadata": {"file_path": "testing/test_conftest.py", "file_name": "test_conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 196, "end_line": 245, "span_ids": ["test_conftest_symlink"], "tokens": 380}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not hasattr(py.path.local, \"mksymlinkto\"),\n reason=\"symlink not available on this platform\",\n)\ndef test_conftest_symlink(testdir):\n \"\"\"Ensure that conftest.py is used for resolved symlinks.\"\"\"\n real = testdir.tmpdir.mkdir(\"real\")\n realtests = real.mkdir(\"app\").mkdir(\"tests\")\n testdir.tmpdir.join(\"symlinktests\").mksymlinkto(realtests)\n testdir.tmpdir.join(\"symlink\").mksymlinkto(real)\n testdir.makepyfile(\n **{\n \"real/app/tests/test_foo.py\": \"def test1(fixture): pass\",\n \"real/conftest.py\": textwrap.dedent(\n \"\"\"\n import pytest\n\n print(\"conftest_loaded\")\n\n @pytest.fixture\n def fixture():\n print(\"fixture_used\")\n \"\"\"\n ),\n }\n )\n result = testdir.runpytest(\"-vs\", \"symlinktests\")\n result.stdout.fnmatch_lines(\n [\n \"*conftest_loaded*\",\n \"real/app/tests/test_foo.py::test1 fixture_used\",\n \"PASSED\",\n ]\n )\n assert result.ret == EXIT_OK\n\n # Should not cause \"ValueError: Plugin already registered\" (#4174).\n result = testdir.runpytest(\"-vs\", \"symlink\")\n assert result.ret == EXIT_OK\n\n realtests.ensure(\"__init__.py\")\n result = testdir.runpytest(\"-vs\", \"symlinktests/test_foo.py::test1\")\n result.stdout.fnmatch_lines(\n [\n \"*conftest_loaded*\",\n \"real/app/tests/test_foo.py::test1 fixture_used\",\n \"PASSED\",\n ]\n )\n assert result.ret == EXIT_OK", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftest_symlink_files_test_conftest_symlink_files.assert_result_ret_EXIT": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftest_symlink_files_test_conftest_symlink_files.assert_result_ret_EXIT", "embedding": null, "metadata": {"file_path": "testing/test_conftest.py", "file_name": "test_conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 248, "end_line": 281, "span_ids": ["test_conftest_symlink_files"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n not hasattr(py.path.local, \"mksymlinkto\"),\n reason=\"symlink not available on this platform\",\n)\ndef test_conftest_symlink_files(testdir):\n \"\"\"Check conftest.py loading when running in directory with symlinks.\"\"\"\n real = testdir.tmpdir.mkdir(\"real\")\n source = {\n \"app/test_foo.py\": \"def test1(fixture): pass\",\n \"app/__init__.py\": \"\",\n \"app/conftest.py\": textwrap.dedent(\n \"\"\"\n import pytest\n\n print(\"conftest_loaded\")\n\n @pytest.fixture\n def fixture():\n print(\"fixture_used\")\n \"\"\"\n ),\n }\n testdir.makepyfile(**{\"real/%s\" % k: v for k, v in source.items()})\n\n # Create a build directory that contains symlinks to actual files\n # but doesn't symlink actual directories.\n build = testdir.tmpdir.mkdir(\"build\")\n build.mkdir(\"app\")\n for f in source:\n build.join(f).mksymlinkto(real.join(f))\n build.chdir()\n result = testdir.runpytest(\"-vs\", \"app/test_foo.py\")\n result.stdout.fnmatch_lines([\"*conftest_loaded*\", \"PASSED\"])\n assert result.ret == EXIT_OK", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_no_conftest_test_conftest_existing_resultlog.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_no_conftest_test_conftest_existing_resultlog.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_conftest.py", "file_name": "test_conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 284, "end_line": 305, "span_ids": ["test_conftest_existing_resultlog", "test_no_conftest"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_no_conftest(testdir):\n testdir.makeconftest(\"assert 0\")\n result = testdir.runpytest(\"--noconftest\")\n assert result.ret == EXIT_NOTESTSCOLLECTED\n\n result = testdir.runpytest()\n assert result.ret == EXIT_USAGEERROR\n\n\ndef test_conftest_existing_resultlog(testdir):\n x = testdir.mkdir(\"tests\")\n x.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n def pytest_addoption(parser):\n parser.addoption(\"--xyz\", action=\"store_true\")\n \"\"\"\n )\n )\n testdir.makefile(ext=\".log\", result=\"\") # Writes result.log\n result = testdir.runpytest(\"-h\", \"--resultlog\", \"result.log\")\n result.stdout.fnmatch_lines([\"*--xyz*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftest_existing_junitxml_test_conftest_existing_junitxml.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftest_existing_junitxml_test_conftest_existing_junitxml.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_conftest.py", "file_name": "test_conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 308, "end_line": 320, "span_ids": ["test_conftest_existing_junitxml"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_conftest_existing_junitxml(testdir):\n x = testdir.mkdir(\"tests\")\n x.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n def pytest_addoption(parser):\n parser.addoption(\"--xyz\", action=\"store_true\")\n \"\"\"\n )\n )\n testdir.makefile(ext=\".xml\", junit=\"\") # Writes junit.xml\n result = testdir.runpytest(\"-h\", \"--junitxml\", \"junit.xml\")\n result.stdout.fnmatch_lines([\"*--xyz*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftest_import_order_test_conftest_import_order.assert_conftest__getconft": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftest_import_order_test_conftest_import_order.assert_conftest__getconft", "embedding": null, "metadata": {"file_path": "testing/test_conftest.py", "file_name": "test_conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 323, "end_line": 335, "span_ids": ["test_conftest_import_order"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_conftest_import_order(testdir, monkeypatch):\n ct1 = testdir.makeconftest(\"\")\n sub = testdir.mkdir(\"sub\")\n ct2 = sub.join(\"conftest.py\")\n ct2.write(\"\")\n\n def impct(p):\n return p\n\n conftest = PytestPluginManager()\n conftest._confcutdir = testdir.tmpdir\n monkeypatch.setattr(conftest, \"_importconftest\", impct)\n assert conftest._getconftestmodules(sub) == [ct1, ct2]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_fixture_dependency_test_fixture_dependency.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_fixture_dependency_test_fixture_dependency.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_conftest.py", "file_name": "test_conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 338, "end_line": 380, "span_ids": ["test_fixture_dependency"], "tokens": 248}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fixture_dependency(testdir, monkeypatch):\n ct1 = testdir.makeconftest(\"\")\n ct1 = testdir.makepyfile(\"__init__.py\")\n ct1.write(\"\")\n sub = testdir.mkdir(\"sub\")\n sub.join(\"__init__.py\").write(\"\")\n sub.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n\n @pytest.fixture\n def not_needed():\n assert False, \"Should not be called!\"\n\n @pytest.fixture\n def foo():\n assert False, \"Should not be called!\"\n\n @pytest.fixture\n def bar(foo):\n return 'bar'\n \"\"\"\n )\n )\n subsub = sub.mkdir(\"subsub\")\n subsub.join(\"__init__.py\").write(\"\")\n subsub.join(\"test_bar.py\").write(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n\n @pytest.fixture\n def bar():\n return 'sub bar'\n\n def test_event_fixture(bar):\n assert bar == 'sub bar'\n \"\"\"\n )\n )\n result = testdir.runpytest(\"sub\")\n result.stdout.fnmatch_lines([\"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftest_found_with_double_dash_test_conftest_found_with_double_dash.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_conftest_found_with_double_dash_test_conftest_found_with_double_dash.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_conftest.py", "file_name": "test_conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 383, "end_line": 400, "span_ids": ["test_conftest_found_with_double_dash"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_conftest_found_with_double_dash(testdir):\n sub = testdir.mkdir(\"sub\")\n sub.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n def pytest_addoption(parser):\n parser.addoption(\"--hello-world\", action=\"store_true\")\n \"\"\"\n )\n )\n p = sub.join(\"test_hello.py\")\n p.write(\"def test_hello(): pass\")\n result = testdir.runpytest(str(p) + \"::test_hello\", \"-h\")\n result.stdout.fnmatch_lines(\n \"\"\"\n *--hello-world*\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_TestConftestVisibility_TestConftestVisibility._setup_tree.return._runner_runner_packa": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_TestConftestVisibility_TestConftestVisibility._setup_tree.return._runner_runner_packa", "embedding": null, "metadata": {"file_path": "testing/test_conftest.py", "file_name": "test_conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 403, "end_line": 465, "span_ids": ["TestConftestVisibility", "TestConftestVisibility._setup_tree"], "tokens": 434}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestConftestVisibility(object):\n def _setup_tree(self, testdir): # for issue616\n # example mostly taken from:\n # https://mail.python.org/pipermail/pytest-dev/2014-September/002617.html\n runner = testdir.mkdir(\"empty\")\n package = testdir.mkdir(\"package\")\n\n package.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n @pytest.fixture\n def fxtr():\n return \"from-package\"\n \"\"\"\n )\n )\n package.join(\"test_pkgroot.py\").write(\n textwrap.dedent(\n \"\"\"\\\n def test_pkgroot(fxtr):\n assert fxtr == \"from-package\"\n \"\"\"\n )\n )\n\n swc = package.mkdir(\"swc\")\n swc.join(\"__init__.py\").ensure()\n swc.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n @pytest.fixture\n def fxtr():\n return \"from-swc\"\n \"\"\"\n )\n )\n swc.join(\"test_with_conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n def test_with_conftest(fxtr):\n assert fxtr == \"from-swc\"\n \"\"\"\n )\n )\n\n snc = package.mkdir(\"snc\")\n snc.join(\"__init__.py\").ensure()\n snc.join(\"test_no_conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n def test_no_conftest(fxtr):\n assert fxtr == \"from-package\" # No local conftest.py, so should\n # use value from parent dir's\n \"\"\"\n )\n )\n print(\"created directory structure:\")\n for x in testdir.tmpdir.visit():\n print(\" \" + x.relto(testdir.tmpdir))\n\n return {\"runner\": runner, \"package\": package, \"swc\": swc, \"snc\": snc}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_TestConftestVisibility._N_B_swc_stands_for__TestConftestVisibility.test_parsefactories_relative_node_ids.with_dirs_chdir_as_cwd_.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_TestConftestVisibility._N_B_swc_stands_for__TestConftestVisibility.test_parsefactories_relative_node_ids.with_dirs_chdir_as_cwd_.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_conftest.py", "file_name": "test_conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 467, "end_line": 504, "span_ids": ["TestConftestVisibility._setup_tree", "TestConftestVisibility.test_parsefactories_relative_node_ids"], "tokens": 419}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestConftestVisibility(object):\n\n # N.B.: \"swc\" stands for \"subdir with conftest.py\"\n # \"snc\" stands for \"subdir no [i.e. without] conftest.py\"\n @pytest.mark.parametrize(\n \"chdir,testarg,expect_ntests_passed\",\n [\n # Effective target: package/..\n (\"runner\", \"..\", 3),\n (\"package\", \"..\", 3),\n (\"swc\", \"../..\", 3),\n (\"snc\", \"../..\", 3),\n # Effective target: package\n (\"runner\", \"../package\", 3),\n (\"package\", \".\", 3),\n (\"swc\", \"..\", 3),\n (\"snc\", \"..\", 3),\n # Effective target: package/swc\n (\"runner\", \"../package/swc\", 1),\n (\"package\", \"./swc\", 1),\n (\"swc\", \".\", 1),\n (\"snc\", \"../swc\", 1),\n # Effective target: package/snc\n (\"runner\", \"../package/snc\", 1),\n (\"package\", \"./snc\", 1),\n (\"swc\", \"../snc\", 1),\n (\"snc\", \".\", 1),\n ],\n )\n @pytest.mark.issue(616)\n def test_parsefactories_relative_node_ids(\n self, testdir, chdir, testarg, expect_ntests_passed\n ):\n dirs = self._setup_tree(testdir)\n print(\"pytest run in cwd: %s\" % (dirs[chdir].relto(testdir.tmpdir)))\n print(\"pytestarg : %s\" % (testarg))\n print(\"expected pass : %s\" % (expect_ntests_passed))\n with dirs[chdir].as_cwd():\n reprec = testdir.inline_run(testarg, \"-q\", \"--traceconfig\")\n reprec.assertoutcome(passed=expect_ntests_passed)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_search_conftest_up_to_inifile_test_search_conftest_up_to_inifile.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_search_conftest_up_to_inifile_test_search_conftest_up_to_inifile.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_conftest.py", "file_name": "test_conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 507, "end_line": 555, "span_ids": ["test_search_conftest_up_to_inifile"], "tokens": 349}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"confcutdir,passed,error\", [(\".\", 2, 0), (\"src\", 1, 1), (None, 1, 1)]\n)\ndef test_search_conftest_up_to_inifile(testdir, confcutdir, passed, error):\n \"\"\"Test that conftest files are detected only up to an ini file, unless\n an explicit --confcutdir option is given.\n \"\"\"\n root = testdir.tmpdir\n src = root.join(\"src\").ensure(dir=1)\n src.join(\"pytest.ini\").write(\"[pytest]\")\n src.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n @pytest.fixture\n def fix1(): pass\n \"\"\"\n )\n )\n src.join(\"test_foo.py\").write(\n textwrap.dedent(\n \"\"\"\\\n def test_1(fix1):\n pass\n def test_2(out_of_reach):\n pass\n \"\"\"\n )\n )\n root.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n import pytest\n @pytest.fixture\n def out_of_reach(): pass\n \"\"\"\n )\n )\n\n args = [str(src)]\n if confcutdir:\n args = [\"--confcutdir=%s\" % root.join(confcutdir)]\n result = testdir.runpytest(*args)\n match = \"\"\n if passed:\n match += \"*%d passed*\" % passed\n if error:\n match += \"*%d error*\" % error\n result.stdout.fnmatch_lines(match)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_issue1073_conftest_special_objects_test_conftest_exception_handling.assert_raise_ValueError_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_issue1073_conftest_special_objects_test_conftest_exception_handling.assert_raise_ValueError_", "embedding": null, "metadata": {"file_path": "testing/test_conftest.py", "file_name": "test_conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 558, "end_line": 592, "span_ids": ["test_issue1073_conftest_special_objects", "test_conftest_exception_handling"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_issue1073_conftest_special_objects(testdir):\n testdir.makeconftest(\n \"\"\"\\\n class DontTouchMe(object):\n def __getattr__(self, x):\n raise Exception('cant touch me')\n\n x = DontTouchMe()\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\\\n def test_some():\n pass\n \"\"\"\n )\n res = testdir.runpytest()\n assert res.ret == 0\n\n\ndef test_conftest_exception_handling(testdir):\n testdir.makeconftest(\n \"\"\"\\\n raise ValueError()\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\\\n def test_some():\n pass\n \"\"\"\n )\n res = testdir.runpytest()\n assert res.ret == 4\n assert \"raise ValueError()\" in [line.strip() for line in res.errlines]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_hook_proxy_test_hook_proxy.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_hook_proxy_test_hook_proxy.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_conftest.py", "file_name": "test_conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 595, "end_line": 614, "span_ids": ["test_hook_proxy"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_hook_proxy(testdir):\n \"\"\"Session's gethookproxy() would cache conftests incorrectly (#2016).\n It was decided to remove the cache altogether.\n \"\"\"\n testdir.makepyfile(\n **{\n \"root/demo-0/test_foo1.py\": \"def test1(): pass\",\n \"root/demo-a/test_foo2.py\": \"def test1(): pass\",\n \"root/demo-a/conftest.py\": \"\"\"\\\n def pytest_ignore_collect(path, config):\n return True\n \"\"\",\n \"root/demo-b/test_foo3.py\": \"def test1(): pass\",\n \"root/demo-c/test_foo4.py\": \"def test1(): pass\",\n }\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\"*test_foo1.py*\", \"*test_foo3.py*\", \"*test_foo4.py*\", \"*3 passed*\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_required_option_help_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_conftest.py_test_required_option_help_", "embedding": null, "metadata": {"file_path": "testing/test_conftest.py", "file_name": "test_conftest.py", "file_type": "text/x-python", "category": "test", "start_line": 617, "end_line": 631, "span_ids": ["test_required_option_help"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_required_option_help(testdir):\n testdir.makeconftest(\"assert 0\")\n x = testdir.mkdir(\"x\")\n x.join(\"conftest.py\").write(\n textwrap.dedent(\n \"\"\"\\\n def pytest_addoption(parser):\n parser.addoption(\"--xyz\", action=\"store_true\", required=True)\n \"\"\"\n )\n )\n result = testdir.runpytest(\"-h\", x)\n assert \"argument --xyz is required\" not in result.stdout.str()\n assert \"general:\" in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py__encoding_utf_8_TestDoctests.test_collect_testtextfile.assert_len_items_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py__encoding_utf_8_TestDoctests.test_collect_testtextfile.assert_len_items_0", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 36, "span_ids": ["TestDoctests.test_collect_testtextfile", "imports", "TestDoctests", "docstring"], "tokens": 262}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# encoding: utf-8\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\nimport textwrap\n\nimport pytest\nfrom _pytest.compat import MODULE_NOT_FOUND_ERROR\nfrom _pytest.doctest import DoctestItem\nfrom _pytest.doctest import DoctestModule\nfrom _pytest.doctest import DoctestTextfile\n\n\nclass TestDoctests(object):\n def test_collect_testtextfile(self, testdir):\n w = testdir.maketxtfile(whatever=\"\")\n checkfile = testdir.maketxtfile(\n test_something=\"\"\"\n alskdjalsdk\n >>> i = 5\n >>> i-1\n 4\n \"\"\"\n )\n\n for x in (testdir.tmpdir, checkfile):\n # print \"checking that %s returns custom items\" % (x,)\n items, reprec = testdir.inline_genitems(x)\n assert len(items) == 1\n assert isinstance(items[0], DoctestItem)\n assert isinstance(items[0].parent, DoctestTextfile)\n # Empty file has no items.\n items, reprec = testdir.inline_genitems(w)\n assert len(items) == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_collect_module_empty_TestDoctests.test_collect_module_single_modulelevel_doctest.for_p_in_path_testdir_t.None_3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_collect_module_empty_TestDoctests.test_collect_module_single_modulelevel_doctest.for_p_in_path_testdir_t.None_3", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 38, "end_line": 50, "span_ids": ["TestDoctests.test_collect_module_empty", "TestDoctests.test_collect_module_single_modulelevel_doctest"], "tokens": 164}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_collect_module_empty(self, testdir):\n path = testdir.makepyfile(whatever=\"#\")\n for p in (path, testdir.tmpdir):\n items, reprec = testdir.inline_genitems(p, \"--doctest-modules\")\n assert len(items) == 0\n\n def test_collect_module_single_modulelevel_doctest(self, testdir):\n path = testdir.makepyfile(whatever='\"\"\">>> pass\"\"\"')\n for p in (path, testdir.tmpdir):\n items, reprec = testdir.inline_genitems(p, \"--doctest-modules\")\n assert len(items) == 1\n assert isinstance(items[0], DoctestItem)\n assert isinstance(items[0].parent, DoctestModule)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_collect_module_two_doctest_one_modulelevel_TestDoctests.test_collect_module_two_doctest_one_modulelevel.for_p_in_path_testdir_t.assert_items_0_parent_is": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_collect_module_two_doctest_one_modulelevel_TestDoctests.test_collect_module_two_doctest_one_modulelevel.for_p_in_path_testdir_t.assert_items_0_parent_is", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 52, "end_line": 66, "span_ids": ["TestDoctests.test_collect_module_two_doctest_one_modulelevel"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_collect_module_two_doctest_one_modulelevel(self, testdir):\n path = testdir.makepyfile(\n whatever=\"\"\"\n '>>> x = None'\n def my_func():\n \">>> magic = 42 \"\n \"\"\"\n )\n for p in (path, testdir.tmpdir):\n items, reprec = testdir.inline_genitems(p, \"--doctest-modules\")\n assert len(items) == 2\n assert isinstance(items[0], DoctestItem)\n assert isinstance(items[1], DoctestItem)\n assert isinstance(items[0].parent, DoctestModule)\n assert items[0].parent is items[1].parent", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_collect_module_two_doctest_no_modulelevel_TestDoctests.test_collect_module_two_doctest_no_modulelevel.for_p_in_path_testdir_t.assert_items_0_parent_is": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_collect_module_two_doctest_no_modulelevel_TestDoctests.test_collect_module_two_doctest_no_modulelevel.for_p_in_path_testdir_t.assert_items_0_parent_is", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 68, "end_line": 92, "span_ids": ["TestDoctests.test_collect_module_two_doctest_no_modulelevel"], "tokens": 201}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_collect_module_two_doctest_no_modulelevel(self, testdir):\n path = testdir.makepyfile(\n whatever=\"\"\"\n '# Empty'\n def my_func():\n \">>> magic = 42 \"\n def unuseful():\n '''\n # This is a function\n # >>> # it doesn't have any doctest\n '''\n def another():\n '''\n # This is another function\n >>> import os # this one does have a doctest\n '''\n \"\"\"\n )\n for p in (path, testdir.tmpdir):\n items, reprec = testdir.inline_genitems(p, \"--doctest-modules\")\n assert len(items) == 2\n assert isinstance(items[0], DoctestItem)\n assert isinstance(items[1], DoctestItem)\n assert isinstance(items[0].parent, DoctestModule)\n assert items[0].parent is items[1].parent", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_simple_doctestfile_TestDoctests.test_new_pattern.reprec_assertoutcome_fail": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_simple_doctestfile_TestDoctests.test_new_pattern.reprec_assertoutcome_fail", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 94, "end_line": 114, "span_ids": ["TestDoctests.test_new_pattern", "TestDoctests.test_simple_doctestfile"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_simple_doctestfile(self, testdir):\n p = testdir.maketxtfile(\n test_doc=\"\"\"\n >>> x = 1\n >>> x == 1\n False\n \"\"\"\n )\n reprec = testdir.inline_run(p)\n reprec.assertoutcome(failed=1)\n\n def test_new_pattern(self, testdir):\n p = testdir.maketxtfile(\n xdoc=\"\"\"\n >>> x = 1\n >>> x == 1\n False\n \"\"\"\n )\n reprec = testdir.inline_run(p, \"--doctest-glob=x*.txt\")\n reprec.assertoutcome(failed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_multiple_patterns_TestDoctests.test_multiple_patterns.None_4": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_multiple_patterns_TestDoctests.test_multiple_patterns.None_4", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 116, "end_line": 144, "span_ids": ["TestDoctests.test_multiple_patterns"], "tokens": 231}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_multiple_patterns(self, testdir):\n \"\"\"Test support for multiple --doctest-glob arguments (#1255).\n \"\"\"\n testdir.maketxtfile(\n xdoc=\"\"\"\n >>> 1\n 1\n \"\"\"\n )\n testdir.makefile(\n \".foo\",\n test=\"\"\"\n >>> 1\n 1\n \"\"\",\n )\n testdir.maketxtfile(\n test_normal=\"\"\"\n >>> 1\n 1\n \"\"\"\n )\n expected = {\"xdoc.txt\", \"test.foo\", \"test_normal.txt\"}\n assert {x.basename for x in testdir.tmpdir.listdir()} == expected\n args = [\"--doctest-glob=xdoc*.txt\", \"--doctest-glob=*.foo\"]\n result = testdir.runpytest(*args)\n result.stdout.fnmatch_lines([\"*test.foo *\", \"*xdoc.txt *\", \"*2 passed*\"])\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*test_normal.txt *\", \"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_encoding_TestDoctests.test_encoding.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_encoding_TestDoctests.test_encoding.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 146, "end_line": 171, "span_ids": ["TestDoctests.test_encoding"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n @pytest.mark.parametrize(\n \" test_string, encoding\",\n [(u\"foo\", \"ascii\"), (u\"\u00f6\u00e4\u00fc\", \"latin1\"), (u\"\u00f6\u00e4\u00fc\", \"utf-8\")],\n )\n def test_encoding(self, testdir, test_string, encoding):\n \"\"\"Test support for doctest_encoding ini option.\n \"\"\"\n testdir.makeini(\n \"\"\"\n [pytest]\n doctest_encoding={}\n \"\"\".format(\n encoding\n )\n )\n doctest = u\"\"\"\n >>> u\"{}\"\n {}\n \"\"\".format(\n test_string, repr(test_string)\n )\n testdir._makefile(\".txt\", [doctest], {}, encoding=encoding)\n\n result = testdir.runpytest()\n\n result.stdout.fnmatch_lines([\"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_doctest_unexpected_exception_TestDoctests.test_doctest_skip.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_doctest_unexpected_exception_TestDoctests.test_doctest_skip.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 173, "end_line": 201, "span_ids": ["TestDoctests.test_doctest_unexpected_exception", "TestDoctests.test_doctest_skip"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_doctest_unexpected_exception(self, testdir):\n testdir.maketxtfile(\n \"\"\"\n >>> i = 0\n >>> 0 / i\n 2\n \"\"\"\n )\n result = testdir.runpytest(\"--doctest-modules\")\n result.stdout.fnmatch_lines(\n [\n \"*unexpected_exception*\",\n \"*>>> i = 0*\",\n \"*>>> 0 / i*\",\n \"*UNEXPECTED*ZeroDivision*\",\n ]\n )\n\n def test_doctest_skip(self, testdir):\n testdir.maketxtfile(\n \"\"\"\n >>> 1\n 1\n >>> import pytest\n >>> pytest.skip(\"\")\n \"\"\"\n )\n result = testdir.runpytest(\"--doctest-modules\")\n result.stdout.fnmatch_lines([\"*1 skipped*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_docstring_partial_context_around_error_TestDoctests.test_docstring_partial_context_around_error.assert_text_line_after_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_docstring_partial_context_around_error_TestDoctests.test_docstring_partial_context_around_error.assert_text_line_after_", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 203, "end_line": 245, "span_ids": ["TestDoctests.test_docstring_partial_context_around_error"], "tokens": 278}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_docstring_partial_context_around_error(self, testdir):\n \"\"\"Test that we show some context before the actual line of a failing\n doctest.\n \"\"\"\n testdir.makepyfile(\n '''\n def foo():\n \"\"\"\n text-line-1\n text-line-2\n text-line-3\n text-line-4\n text-line-5\n text-line-6\n text-line-7\n text-line-8\n text-line-9\n text-line-10\n text-line-11\n >>> 1 + 1\n 3\n\n text-line-after\n \"\"\"\n '''\n )\n result = testdir.runpytest(\"--doctest-modules\")\n result.stdout.fnmatch_lines(\n [\n \"*docstring_partial_context_around_error*\",\n \"005*text-line-3\",\n \"006*text-line-4\",\n \"013*text-line-11\",\n \"014*>>> 1 + 1\",\n \"Expected:\",\n \" 3\",\n \"Got:\",\n \" 2\",\n ]\n )\n # lines below should be trimmed out\n assert \"text-line-2\" not in result.stdout.str()\n assert \"text-line-after\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_docstring_full_context_around_error_TestDoctests.test_docstring_full_context_around_error.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_docstring_full_context_around_error_TestDoctests.test_docstring_full_context_around_error.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 247, "end_line": 275, "span_ids": ["TestDoctests.test_docstring_full_context_around_error"], "tokens": 187}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_docstring_full_context_around_error(self, testdir):\n \"\"\"Test that we show the whole context before the actual line of a failing\n doctest, provided that the context is up to 10 lines long.\n \"\"\"\n testdir.makepyfile(\n '''\n def foo():\n \"\"\"\n text-line-1\n text-line-2\n\n >>> 1 + 1\n 3\n \"\"\"\n '''\n )\n result = testdir.runpytest(\"--doctest-modules\")\n result.stdout.fnmatch_lines(\n [\n \"*docstring_full_context_around_error*\",\n \"003*text-line-1\",\n \"004*text-line-2\",\n \"006*>>> 1 + 1\",\n \"Expected:\",\n \" 3\",\n \"Got:\",\n \" 2\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_doctest_linedata_missing_TestDoctests.test_doctest_linedata_missing.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_doctest_linedata_missing_TestDoctests.test_doctest_linedata_missing.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 277, "end_line": 300, "span_ids": ["TestDoctests.test_doctest_linedata_missing"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_doctest_linedata_missing(self, testdir):\n testdir.tmpdir.join(\"hello.py\").write(\n textwrap.dedent(\n \"\"\"\\\n class Fun(object):\n @property\n def test(self):\n '''\n >>> a = 1\n >>> 1/0\n '''\n \"\"\"\n )\n )\n result = testdir.runpytest(\"--doctest-modules\")\n result.stdout.fnmatch_lines(\n [\n \"*hello*\",\n \"*EXAMPLE LOCATION UNKNOWN, not showing all tests of that example*\",\n \"*1/0*\",\n \"*UNEXPECTED*ZeroDivision*\",\n \"*1 failed*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_doctest_unex_importerror_only_txt_TestDoctests.test_doctest_unex_importerror_only_txt.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_doctest_unex_importerror_only_txt_TestDoctests.test_doctest_unex_importerror_only_txt.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 302, "end_line": 317, "span_ids": ["TestDoctests.test_doctest_unex_importerror_only_txt"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_doctest_unex_importerror_only_txt(self, testdir):\n testdir.maketxtfile(\n \"\"\"\n >>> import asdalsdkjaslkdjasd\n >>>\n \"\"\"\n )\n result = testdir.runpytest()\n # doctest is never executed because of error during hello.py collection\n result.stdout.fnmatch_lines(\n [\n \"*>>> import asdals*\",\n \"*UNEXPECTED*{e}*\".format(e=MODULE_NOT_FOUND_ERROR),\n \"{e}: No module named *asdal*\".format(e=MODULE_NOT_FOUND_ERROR),\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_doctest_unex_importerror_with_module_TestDoctests.test_doctest_unex_importerror_with_module.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_doctest_unex_importerror_with_module_TestDoctests.test_doctest_unex_importerror_with_module.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 319, "end_line": 341, "span_ids": ["TestDoctests.test_doctest_unex_importerror_with_module"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_doctest_unex_importerror_with_module(self, testdir):\n testdir.tmpdir.join(\"hello.py\").write(\n textwrap.dedent(\n \"\"\"\\\n import asdalsdkjaslkdjasd\n \"\"\"\n )\n )\n testdir.maketxtfile(\n \"\"\"\n >>> import hello\n >>>\n \"\"\"\n )\n result = testdir.runpytest(\"--doctest-modules\")\n # doctest is never executed because of error during hello.py collection\n result.stdout.fnmatch_lines(\n [\n \"*ERROR collecting hello.py*\",\n \"*{e}: No module named *asdals*\".format(e=MODULE_NOT_FOUND_ERROR),\n \"*Interrupted: 1 errors during collection*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_doctestmodule_TestDoctests.test_doctestmodule_external_and_issue116.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_doctestmodule_TestDoctests.test_doctestmodule_external_and_issue116.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 343, "end_line": 382, "span_ids": ["TestDoctests.test_doctestmodule_external_and_issue116", "TestDoctests.test_doctestmodule"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_doctestmodule(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n '''\n >>> x = 1\n >>> x == 1\n False\n\n '''\n \"\"\"\n )\n reprec = testdir.inline_run(p, \"--doctest-modules\")\n reprec.assertoutcome(failed=1)\n\n def test_doctestmodule_external_and_issue116(self, testdir):\n p = testdir.mkpydir(\"hello\")\n p.join(\"__init__.py\").write(\n textwrap.dedent(\n \"\"\"\\\n def somefunc():\n '''\n >>> i = 0\n >>> i + 1\n 2\n '''\n \"\"\"\n )\n )\n result = testdir.runpytest(p, \"--doctest-modules\")\n result.stdout.fnmatch_lines(\n [\n \"003 *>>> i = 0\",\n \"004 *>>> i + 1\",\n \"*Expected:\",\n \"* 2\",\n \"*Got:\",\n \"* 1\",\n \"*:4: DocTestFailure\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_txtfile_failing_TestDoctests.test_txtfile_with_fixtures.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_txtfile_failing_TestDoctests.test_txtfile_with_fixtures.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 384, "end_line": 414, "span_ids": ["TestDoctests.test_txtfile_with_fixtures", "TestDoctests.test_txtfile_failing"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_txtfile_failing(self, testdir):\n p = testdir.maketxtfile(\n \"\"\"\n >>> i = 0\n >>> i + 1\n 2\n \"\"\"\n )\n result = testdir.runpytest(p, \"-s\")\n result.stdout.fnmatch_lines(\n [\n \"001 >>> i = 0\",\n \"002 >>> i + 1\",\n \"Expected:\",\n \" 2\",\n \"Got:\",\n \" 1\",\n \"*test_txtfile_failing.txt:2: DocTestFailure\",\n ]\n )\n\n def test_txtfile_with_fixtures(self, testdir):\n p = testdir.maketxtfile(\n \"\"\"\n >>> dir = getfixture('tmpdir')\n >>> type(dir).__name__\n 'LocalPath'\n \"\"\"\n )\n reprec = testdir.inline_run(p)\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_txtfile_with_usefixtures_in_ini_TestDoctests.test_doctestmodule_with_fixtures.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_txtfile_with_usefixtures_in_ini_TestDoctests.test_doctestmodule_with_fixtures.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 416, "end_line": 453, "span_ids": ["TestDoctests.test_txtfile_with_usefixtures_in_ini", "TestDoctests.test_doctestmodule_with_fixtures"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_txtfile_with_usefixtures_in_ini(self, testdir):\n testdir.makeini(\n \"\"\"\n [pytest]\n usefixtures = myfixture\n \"\"\"\n )\n testdir.makeconftest(\n \"\"\"\n import pytest\n @pytest.fixture\n def myfixture(monkeypatch):\n monkeypatch.setenv(\"HELLO\", \"WORLD\")\n \"\"\"\n )\n\n p = testdir.maketxtfile(\n \"\"\"\n >>> import os\n >>> os.environ[\"HELLO\"]\n 'WORLD'\n \"\"\"\n )\n reprec = testdir.inline_run(p)\n reprec.assertoutcome(passed=1)\n\n def test_doctestmodule_with_fixtures(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n '''\n >>> dir = getfixture('tmpdir')\n >>> type(dir).__name__\n 'LocalPath'\n '''\n \"\"\"\n )\n reprec = testdir.inline_run(p, \"--doctest-modules\")\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_doctestmodule_three_tests_TestDoctests.test_doctestmodule_three_tests.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_doctestmodule_three_tests_TestDoctests.test_doctestmodule_three_tests.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 455, "end_line": 480, "span_ids": ["TestDoctests.test_doctestmodule_three_tests"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_doctestmodule_three_tests(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n '''\n >>> dir = getfixture('tmpdir')\n >>> type(dir).__name__\n 'LocalPath'\n '''\n def my_func():\n '''\n >>> magic = 42\n >>> magic - 42\n 0\n '''\n def unuseful():\n pass\n def another():\n '''\n >>> import os\n >>> os is os\n True\n '''\n \"\"\"\n )\n reprec = testdir.inline_run(p, \"--doctest-modules\")\n reprec.assertoutcome(passed=3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_doctestmodule_two_tests_one_fail_TestDoctests.test_doctestmodule_two_tests_one_fail.reprec_assertoutcome_fail": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_doctestmodule_two_tests_one_fail_TestDoctests.test_doctestmodule_two_tests_one_fail.reprec_assertoutcome_fail", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 482, "end_line": 501, "span_ids": ["TestDoctests.test_doctestmodule_two_tests_one_fail"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_doctestmodule_two_tests_one_fail(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n class MyClass(object):\n def bad_meth(self):\n '''\n >>> magic = 42\n >>> magic\n 0\n '''\n def nice_meth(self):\n '''\n >>> magic = 42\n >>> magic - 42\n 0\n '''\n \"\"\"\n )\n reprec = testdir.inline_run(p, \"--doctest-modules\")\n reprec.assertoutcome(failed=1, passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_ignored_whitespace_TestDoctests.test_ignored_whitespace.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_ignored_whitespace_TestDoctests.test_ignored_whitespace.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 503, "end_line": 522, "span_ids": ["TestDoctests.test_ignored_whitespace"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_ignored_whitespace(self, testdir):\n testdir.makeini(\n \"\"\"\n [pytest]\n doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE\n \"\"\"\n )\n p = testdir.makepyfile(\n \"\"\"\n class MyClass(object):\n '''\n >>> a = \"foo \"\n >>> print(a)\n foo\n '''\n pass\n \"\"\"\n )\n reprec = testdir.inline_run(p, \"--doctest-modules\")\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_non_ignored_whitespace_TestDoctests.test_non_ignored_whitespace.reprec_assertoutcome_fail": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_non_ignored_whitespace_TestDoctests.test_non_ignored_whitespace.reprec_assertoutcome_fail", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 524, "end_line": 543, "span_ids": ["TestDoctests.test_non_ignored_whitespace"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_non_ignored_whitespace(self, testdir):\n testdir.makeini(\n \"\"\"\n [pytest]\n doctest_optionflags = ELLIPSIS\n \"\"\"\n )\n p = testdir.makepyfile(\n \"\"\"\n class MyClass(object):\n '''\n >>> a = \"foo \"\n >>> print(a)\n foo\n '''\n pass\n \"\"\"\n )\n reprec = testdir.inline_run(p, \"--doctest-modules\")\n reprec.assertoutcome(failed=1, passed=0)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_ignored_whitespace_glob_TestDoctests.test_ignored_whitespace_glob.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_ignored_whitespace_glob_TestDoctests.test_ignored_whitespace_glob.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 545, "end_line": 560, "span_ids": ["TestDoctests.test_ignored_whitespace_glob"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_ignored_whitespace_glob(self, testdir):\n testdir.makeini(\n \"\"\"\n [pytest]\n doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE\n \"\"\"\n )\n p = testdir.maketxtfile(\n xdoc=\"\"\"\n >>> a = \"foo \"\n >>> print(a)\n foo\n \"\"\"\n )\n reprec = testdir.inline_run(p, \"--doctest-glob=x*.txt\")\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_non_ignored_whitespace_glob_TestDoctests.test_non_ignored_whitespace_glob.reprec_assertoutcome_fail": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_non_ignored_whitespace_glob_TestDoctests.test_non_ignored_whitespace_glob.reprec_assertoutcome_fail", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 562, "end_line": 577, "span_ids": ["TestDoctests.test_non_ignored_whitespace_glob"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_non_ignored_whitespace_glob(self, testdir):\n testdir.makeini(\n \"\"\"\n [pytest]\n doctest_optionflags = ELLIPSIS\n \"\"\"\n )\n p = testdir.maketxtfile(\n xdoc=\"\"\"\n >>> a = \"foo \"\n >>> print(a)\n foo\n \"\"\"\n )\n reprec = testdir.inline_run(p, \"--doctest-glob=x*.txt\")\n reprec.assertoutcome(failed=1, passed=0)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_contains_unicode_TestDoctests.test_contains_unicode.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_contains_unicode_TestDoctests.test_contains_unicode.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 579, "end_line": 593, "span_ids": ["TestDoctests.test_contains_unicode"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_contains_unicode(self, testdir):\n \"\"\"Fix internal error with docstrings containing non-ascii characters.\n \"\"\"\n testdir.makepyfile(\n u'''\n # encoding: utf-8\n def foo():\n \"\"\"\n >>> name = '\u0441' # not letter 'c' but instead Cyrillic 's'.\n 'anything'\n \"\"\"\n '''\n )\n result = testdir.runpytest(\"--doctest-modules\")\n result.stdout.fnmatch_lines([\"Got nothing\", \"* 1 failed in*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_ignore_import_errors_on_doctest_TestDoctests.test_ignore_import_errors_on_doctest.reprec_assertoutcome_skip": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_ignore_import_errors_on_doctest_TestDoctests.test_ignore_import_errors_on_doctest.reprec_assertoutcome_skip", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 595, "end_line": 612, "span_ids": ["TestDoctests.test_ignore_import_errors_on_doctest"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_ignore_import_errors_on_doctest(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n import asdf\n\n def add_one(x):\n '''\n >>> add_one(1)\n 2\n '''\n return x + 1\n \"\"\"\n )\n\n reprec = testdir.inline_run(\n p, \"--doctest-modules\", \"--doctest-ignore-import-errors\"\n )\n reprec.assertoutcome(skipped=1, failed=1, passed=0)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_junit_report_for_doctest_TestDoctests.test_junit_report_for_doctest.reprec_assertoutcome_fail": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_junit_report_for_doctest_TestDoctests.test_junit_report_for_doctest.reprec_assertoutcome_fail", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 614, "end_line": 629, "span_ids": ["TestDoctests.test_junit_report_for_doctest"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_junit_report_for_doctest(self, testdir):\n \"\"\"\n #713: Fix --junit-xml option when used with --doctest-modules.\n \"\"\"\n p = testdir.makepyfile(\n \"\"\"\n def foo():\n '''\n >>> 1 + 1\n 3\n '''\n pass\n \"\"\"\n )\n reprec = testdir.inline_run(p, \"--doctest-modules\", \"--junit-xml=junit.xml\")\n reprec.assertoutcome(failed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_unicode_doctest_TestDoctests.test_unicode_doctest.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_unicode_doctest_TestDoctests.test_unicode_doctest.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 631, "end_line": 652, "span_ids": ["TestDoctests.test_unicode_doctest"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_unicode_doctest(self, testdir):\n \"\"\"\n Test case for issue 2434: DecodeError on Python 2 when doctest contains non-ascii\n characters.\n \"\"\"\n p = testdir.maketxtfile(\n test_unicode_doctest=\"\"\"\n .. doctest::\n\n >>> print(\n ... \"Hi\\\\n\\\\nBy\u00e9\")\n Hi\n ...\n By\u00e9\n >>> 1/0 # By\u00e9\n 1\n \"\"\"\n )\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines(\n [\"*UNEXPECTED EXCEPTION: ZeroDivisionError*\", \"*1 failed*\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_unicode_doctest_module_TestDoctests.test_unicode_doctest_module.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_unicode_doctest_module_TestDoctests.test_unicode_doctest_module.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 654, "end_line": 673, "span_ids": ["TestDoctests.test_unicode_doctest_module"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_unicode_doctest_module(self, testdir):\n \"\"\"\n Test case for issue 2434: DecodeError on Python 2 when doctest docstring\n contains non-ascii characters.\n \"\"\"\n p = testdir.makepyfile(\n test_unicode_doctest_module=\"\"\"\n # -*- encoding: utf-8 -*-\n from __future__ import unicode_literals\n\n def fix_bad_unicode(text):\n '''\n >>> print(fix_bad_unicode('\u00c3\u00banico'))\n \u00fanico\n '''\n return \"\u00fanico\"\n \"\"\"\n )\n result = testdir.runpytest(p, \"--doctest-modules\")\n result.stdout.fnmatch_lines([\"* 1 passed *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_print_unicode_value_TestDoctests.test_print_unicode_value.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_print_unicode_value_TestDoctests.test_print_unicode_value.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 675, "end_line": 689, "span_ids": ["TestDoctests.test_print_unicode_value"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_print_unicode_value(self, testdir):\n \"\"\"\n Test case for issue 3583: Printing Unicode in doctest under Python 2.7\n doesn't work\n \"\"\"\n p = testdir.maketxtfile(\n test_print_unicode_value=r\"\"\"\n Here is a doctest::\n\n >>> print(u'\\xE5\\xE9\\xEE\\xF8\\xFC')\n \u00e5\u00e9\u00ee\u00f8\u00fc\n \"\"\"\n )\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines([\"* 1 passed *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_reportinfo_TestDoctests.test_reportinfo.assert_reportinfo_1_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_reportinfo_TestDoctests.test_reportinfo.assert_reportinfo_1_1", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 691, "end_line": 707, "span_ids": ["TestDoctests.test_reportinfo"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_reportinfo(self, testdir):\n \"\"\"\n Test case to make sure that DoctestItem.reportinfo() returns lineno.\n \"\"\"\n p = testdir.makepyfile(\n test_reportinfo=\"\"\"\n def foo(x):\n '''\n >>> foo('a')\n 'b'\n '''\n return 'c'\n \"\"\"\n )\n items, reprec = testdir.inline_genitems(p, \"--doctest-modules\")\n reportinfo = items[0].reportinfo()\n assert reportinfo[1] == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_valid_setup_py_TestDoctests.test_invalid_setup_py.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctests.test_valid_setup_py_TestDoctests.test_invalid_setup_py.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 709, "end_line": 739, "span_ids": ["TestDoctests.test_invalid_setup_py", "TestDoctests.test_valid_setup_py"], "tokens": 224}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctests(object):\n\n def test_valid_setup_py(self, testdir):\n \"\"\"\n Test to make sure that pytest ignores valid setup.py files when ran\n with --doctest-modules\n \"\"\"\n p = testdir.makepyfile(\n setup=\"\"\"\n from setuptools import setup, find_packages\n setup(name='sample',\n version='0.0',\n description='description',\n packages=find_packages()\n )\n \"\"\"\n )\n result = testdir.runpytest(p, \"--doctest-modules\")\n result.stdout.fnmatch_lines([\"*collected 0 items*\"])\n\n def test_invalid_setup_py(self, testdir):\n \"\"\"\n Test to make sure that pytest reads setup.py files that are not used\n for python packages when ran with --doctest-modules\n \"\"\"\n p = testdir.makepyfile(\n setup=\"\"\"\n def test_foo():\n return 'bar'\n \"\"\"\n )\n result = testdir.runpytest(p, \"--doctest-modules\")\n result.stdout.fnmatch_lines([\"*collected 1 item*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestLiterals_TestLiterals.test_allow_unicode.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestLiterals_TestLiterals.test_allow_unicode.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 742, "end_line": 780, "span_ids": ["TestLiterals.test_allow_unicode", "TestLiterals"], "tokens": 243}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestLiterals(object):\n @pytest.mark.parametrize(\"config_mode\", [\"ini\", \"comment\"])\n def test_allow_unicode(self, testdir, config_mode):\n \"\"\"Test that doctests which output unicode work in all python versions\n tested by pytest when the ALLOW_UNICODE option is used (either in\n the ini file or by an inline comment).\n \"\"\"\n if config_mode == \"ini\":\n testdir.makeini(\n \"\"\"\n [pytest]\n doctest_optionflags = ALLOW_UNICODE\n \"\"\"\n )\n comment = \"\"\n else:\n comment = \"#doctest: +ALLOW_UNICODE\"\n\n testdir.maketxtfile(\n test_doc=\"\"\"\n >>> b'12'.decode('ascii') {comment}\n '12'\n \"\"\".format(\n comment=comment\n )\n )\n testdir.makepyfile(\n foo=\"\"\"\n def foo():\n '''\n >>> b'12'.decode('ascii') {comment}\n '12'\n '''\n \"\"\".format(\n comment=comment\n )\n )\n reprec = testdir.inline_run(\"--doctest-modules\")\n reprec.assertoutcome(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestLiterals.test_allow_bytes_TestLiterals.test_allow_bytes.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestLiterals.test_allow_bytes_TestLiterals.test_allow_bytes.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 782, "end_line": 819, "span_ids": ["TestLiterals.test_allow_bytes"], "tokens": 241}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestLiterals(object):\n\n @pytest.mark.parametrize(\"config_mode\", [\"ini\", \"comment\"])\n def test_allow_bytes(self, testdir, config_mode):\n \"\"\"Test that doctests which output bytes work in all python versions\n tested by pytest when the ALLOW_BYTES option is used (either in\n the ini file or by an inline comment)(#1287).\n \"\"\"\n if config_mode == \"ini\":\n testdir.makeini(\n \"\"\"\n [pytest]\n doctest_optionflags = ALLOW_BYTES\n \"\"\"\n )\n comment = \"\"\n else:\n comment = \"#doctest: +ALLOW_BYTES\"\n\n testdir.maketxtfile(\n test_doc=\"\"\"\n >>> b'foo' {comment}\n 'foo'\n \"\"\".format(\n comment=comment\n )\n )\n testdir.makepyfile(\n foo=\"\"\"\n def foo():\n '''\n >>> b'foo' {comment}\n 'foo'\n '''\n \"\"\".format(\n comment=comment\n )\n )\n reprec = testdir.inline_run(\"--doctest-modules\")\n reprec.assertoutcome(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestLiterals.test_unicode_string_TestLiterals.test_unicode_string.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestLiterals.test_unicode_string_TestLiterals.test_unicode_string.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 821, "end_line": 834, "span_ids": ["TestLiterals.test_unicode_string"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestLiterals(object):\n\n def test_unicode_string(self, testdir):\n \"\"\"Test that doctests which output unicode fail in Python 2 when\n the ALLOW_UNICODE option is not used. The same test should pass\n in Python 3.\n \"\"\"\n testdir.maketxtfile(\n test_doc=\"\"\"\n >>> b'12'.decode('ascii')\n '12'\n \"\"\"\n )\n reprec = testdir.inline_run()\n passed = int(sys.version_info[0] >= 3)\n reprec.assertoutcome(passed=passed, failed=int(not passed))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestLiterals.test_bytes_literal_TestLiterals.test_bytes_literal.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestLiterals.test_bytes_literal_TestLiterals.test_bytes_literal.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 836, "end_line": 849, "span_ids": ["TestLiterals.test_bytes_literal"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestLiterals(object):\n\n def test_bytes_literal(self, testdir):\n \"\"\"Test that doctests which output bytes fail in Python 3 when\n the ALLOW_BYTES option is not used. The same test should pass\n in Python 2 (#1287).\n \"\"\"\n testdir.maketxtfile(\n test_doc=\"\"\"\n >>> b'foo'\n 'foo'\n \"\"\"\n )\n reprec = testdir.inline_run()\n passed = int(sys.version_info[0] == 2)\n reprec.assertoutcome(passed=passed, failed=int(not passed))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestSkips_TestDoctestSkips.test_vacuous_all_skipped.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestSkips_TestDoctestSkips.test_vacuous_all_skipped.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 852, "end_line": 909, "span_ids": ["TestDoctestSkips.test_one_skipped_failed", "TestDoctestSkips.test_all_skipped", "TestDoctestSkips.test_one_skipped", "TestDoctestSkips", "TestDoctestSkips.test_vacuous_all_skipped", "TestDoctestSkips.makedoctest"], "tokens": 440}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctestSkips(object):\n \"\"\"\n If all examples in a doctest are skipped due to the SKIP option, then\n the tests should be SKIPPED rather than PASSED. (#957)\n \"\"\"\n\n @pytest.fixture(params=[\"text\", \"module\"])\n def makedoctest(self, testdir, request):\n def makeit(doctest):\n mode = request.param\n if mode == \"text\":\n testdir.maketxtfile(doctest)\n else:\n assert mode == \"module\"\n testdir.makepyfile('\"\"\"\\n%s\"\"\"' % doctest)\n\n return makeit\n\n def test_one_skipped(self, testdir, makedoctest):\n makedoctest(\n \"\"\"\n >>> 1 + 1 # doctest: +SKIP\n 2\n >>> 2 + 2\n 4\n \"\"\"\n )\n reprec = testdir.inline_run(\"--doctest-modules\")\n reprec.assertoutcome(passed=1)\n\n def test_one_skipped_failed(self, testdir, makedoctest):\n makedoctest(\n \"\"\"\n >>> 1 + 1 # doctest: +SKIP\n 2\n >>> 2 + 2\n 200\n \"\"\"\n )\n reprec = testdir.inline_run(\"--doctest-modules\")\n reprec.assertoutcome(failed=1)\n\n def test_all_skipped(self, testdir, makedoctest):\n makedoctest(\n \"\"\"\n >>> 1 + 1 # doctest: +SKIP\n 2\n >>> 2 + 2 # doctest: +SKIP\n 200\n \"\"\"\n )\n reprec = testdir.inline_run(\"--doctest-modules\")\n reprec.assertoutcome(skipped=1)\n\n def test_vacuous_all_skipped(self, testdir, makedoctest):\n makedoctest(\"\")\n reprec = testdir.inline_run(\"--doctest-modules\")\n reprec.assertoutcome(passed=0, skipped=0)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestSkips.test_continue_on_failure_TestDoctestSkips.test_continue_on_failure.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestSkips.test_continue_on_failure_TestDoctestSkips.test_continue_on_failure.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 911, "end_line": 930, "span_ids": ["TestDoctestSkips.test_continue_on_failure"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctestSkips(object):\n\n def test_continue_on_failure(self, testdir):\n testdir.maketxtfile(\n test_something=\"\"\"\n >>> i = 5\n >>> def foo():\n ... raise ValueError('error1')\n >>> foo()\n >>> i\n >>> i + 2\n 7\n >>> i + 1\n \"\"\"\n )\n result = testdir.runpytest(\"--doctest-modules\", \"--doctest-continue-on-failure\")\n result.assert_outcomes(passed=0, failed=1)\n # The lines that contains the failure are 4, 5, and 8. The first one\n # is a stack trace and the other two are mismatches.\n result.stdout.fnmatch_lines(\n [\"*4: UnexpectedException*\", \"*5: DocTestFailure*\", \"*8: DocTestFailure*\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestAutoUseFixtures_TestDoctestAutoUseFixtures.test_doctest_module_session_fixture.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestAutoUseFixtures_TestDoctestAutoUseFixtures.test_doctest_module_session_fixture.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 933, "end_line": 971, "span_ids": ["TestDoctestAutoUseFixtures", "TestDoctestAutoUseFixtures.test_doctest_module_session_fixture"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctestAutoUseFixtures(object):\n\n SCOPES = [\"module\", \"session\", \"class\", \"function\"]\n\n def test_doctest_module_session_fixture(self, testdir):\n \"\"\"Test that session fixtures are initialized for doctest modules (#768)\n \"\"\"\n # session fixture which changes some global data, which will\n # be accessed by doctests in a module\n testdir.makeconftest(\n \"\"\"\n import pytest\n import sys\n\n @pytest.yield_fixture(autouse=True, scope='session')\n def myfixture():\n assert not hasattr(sys, 'pytest_session_data')\n sys.pytest_session_data = 1\n yield\n del sys.pytest_session_data\n \"\"\"\n )\n testdir.makepyfile(\n foo=\"\"\"\n import sys\n\n def foo():\n '''\n >>> assert sys.pytest_session_data == 1\n '''\n\n def bar():\n '''\n >>> assert sys.pytest_session_data == 1\n '''\n \"\"\"\n )\n result = testdir.runpytest(\"--doctest-modules\")\n result.stdout.fnmatch_lines([\"*2 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestAutoUseFixtures.test_fixture_scopes_TestDoctestAutoUseFixtures.test_fixture_scopes.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestAutoUseFixtures.test_fixture_scopes_TestDoctestAutoUseFixtures.test_fixture_scopes.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 973, "end_line": 1004, "span_ids": ["TestDoctestAutoUseFixtures.test_fixture_scopes"], "tokens": 238}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctestAutoUseFixtures(object):\n\n @pytest.mark.parametrize(\"scope\", SCOPES)\n @pytest.mark.parametrize(\"enable_doctest\", [True, False])\n def test_fixture_scopes(self, testdir, scope, enable_doctest):\n \"\"\"Test that auto-use fixtures work properly with doctest modules.\n See #1057 and #1100.\n \"\"\"\n testdir.makeconftest(\n \"\"\"\n import pytest\n\n @pytest.fixture(autouse=True, scope=\"{scope}\")\n def auto(request):\n return 99\n \"\"\".format(\n scope=scope\n )\n )\n testdir.makepyfile(\n test_1='''\n def test_foo():\n \"\"\"\n >>> getfixture('auto') + 1\n 100\n \"\"\"\n def test_bar():\n assert 1\n '''\n )\n params = (\"--doctest-modules\",) if enable_doctest else ()\n passes = 3 if enable_doctest else 2\n result = testdir.runpytest(*params)\n result.stdout.fnmatch_lines([\"*=== %d passed in *\" % passes])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestAutoUseFixtures.test_fixture_module_doctest_scopes_TestDoctestAutoUseFixtures.test_fixture_module_doctest_scopes.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestAutoUseFixtures.test_fixture_module_doctest_scopes_TestDoctestAutoUseFixtures.test_fixture_module_doctest_scopes.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 1006, "end_line": 1042, "span_ids": ["TestDoctestAutoUseFixtures.test_fixture_module_doctest_scopes"], "tokens": 274}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctestAutoUseFixtures(object):\n\n @pytest.mark.parametrize(\"scope\", SCOPES)\n @pytest.mark.parametrize(\"autouse\", [True, False])\n @pytest.mark.parametrize(\"use_fixture_in_doctest\", [True, False])\n def test_fixture_module_doctest_scopes(\n self, testdir, scope, autouse, use_fixture_in_doctest\n ):\n \"\"\"Test that auto-use fixtures work properly with doctest files.\n See #1057 and #1100.\n \"\"\"\n testdir.makeconftest(\n \"\"\"\n import pytest\n\n @pytest.fixture(autouse={autouse}, scope=\"{scope}\")\n def auto(request):\n return 99\n \"\"\".format(\n scope=scope, autouse=autouse\n )\n )\n if use_fixture_in_doctest:\n testdir.maketxtfile(\n test_doc=\"\"\"\n >>> getfixture('auto')\n 99\n \"\"\"\n )\n else:\n testdir.maketxtfile(\n test_doc=\"\"\"\n >>> 1 + 1\n 2\n \"\"\"\n )\n result = testdir.runpytest(\"--doctest-modules\")\n assert \"FAILURES\" not in str(result.stdout.str())\n result.stdout.fnmatch_lines([\"*=== 1 passed in *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestAutoUseFixtures.test_auto_use_request_attributes_TestDoctestAutoUseFixtures.test_auto_use_request_attributes.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestAutoUseFixtures.test_auto_use_request_attributes_TestDoctestAutoUseFixtures.test_auto_use_request_attributes.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 1044, "end_line": 1074, "span_ids": ["TestDoctestAutoUseFixtures.test_auto_use_request_attributes"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctestAutoUseFixtures(object):\n\n @pytest.mark.parametrize(\"scope\", SCOPES)\n def test_auto_use_request_attributes(self, testdir, scope):\n \"\"\"Check that all attributes of a request in an autouse fixture\n behave as expected when requested for a doctest item.\n \"\"\"\n testdir.makeconftest(\n \"\"\"\n import pytest\n\n @pytest.fixture(autouse=True, scope=\"{scope}\")\n def auto(request):\n if \"{scope}\" == 'module':\n assert request.module is None\n if \"{scope}\" == 'class':\n assert request.cls is None\n if \"{scope}\" == 'function':\n assert request.function is None\n return 99\n \"\"\".format(\n scope=scope\n )\n )\n testdir.maketxtfile(\n test_doc=\"\"\"\n >>> 1 + 1\n 2\n \"\"\"\n )\n result = testdir.runpytest(\"--doctest-modules\")\n assert \"FAILURES\" not in str(result.stdout.str())\n result.stdout.fnmatch_lines([\"*=== 1 passed in *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestNamespaceFixture_TestDoctestNamespaceFixture.test_namespace_doctestfile.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestNamespaceFixture_TestDoctestNamespaceFixture.test_namespace_doctestfile.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 1077, "end_line": 1106, "span_ids": ["TestDoctestNamespaceFixture", "TestDoctestNamespaceFixture.test_namespace_doctestfile"], "tokens": 187}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctestNamespaceFixture(object):\n\n SCOPES = [\"module\", \"session\", \"class\", \"function\"]\n\n @pytest.mark.parametrize(\"scope\", SCOPES)\n def test_namespace_doctestfile(self, testdir, scope):\n \"\"\"\n Check that inserting something into the namespace works in a\n simple text file doctest\n \"\"\"\n testdir.makeconftest(\n \"\"\"\n import pytest\n import contextlib\n\n @pytest.fixture(autouse=True, scope=\"{scope}\")\n def add_contextlib(doctest_namespace):\n doctest_namespace['cl'] = contextlib\n \"\"\".format(\n scope=scope\n )\n )\n p = testdir.maketxtfile(\n \"\"\"\n >>> print(cl.__name__)\n contextlib\n \"\"\"\n )\n reprec = testdir.inline_run(p)\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestNamespaceFixture.test_namespace_pyfile_TestDoctestNamespaceFixture.test_namespace_pyfile.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestNamespaceFixture.test_namespace_pyfile_TestDoctestNamespaceFixture.test_namespace_pyfile.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 1108, "end_line": 1136, "span_ids": ["TestDoctestNamespaceFixture.test_namespace_pyfile"], "tokens": 183}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctestNamespaceFixture(object):\n\n @pytest.mark.parametrize(\"scope\", SCOPES)\n def test_namespace_pyfile(self, testdir, scope):\n \"\"\"\n Check that inserting something into the namespace works in a\n simple Python file docstring doctest\n \"\"\"\n testdir.makeconftest(\n \"\"\"\n import pytest\n import contextlib\n\n @pytest.fixture(autouse=True, scope=\"{scope}\")\n def add_contextlib(doctest_namespace):\n doctest_namespace['cl'] = contextlib\n \"\"\".format(\n scope=scope\n )\n )\n p = testdir.makepyfile(\n \"\"\"\n def foo():\n '''\n >>> print(cl.__name__)\n contextlib\n '''\n \"\"\"\n )\n reprec = testdir.inline_run(p, \"--doctest-modules\")\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestReportingOption_TestDoctestReportingOption._run_doctest_report.return.testdir_runpytest_doct": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestReportingOption_TestDoctestReportingOption._run_doctest_report.return.testdir_runpytest_doct", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 1139, "end_line": 1157, "span_ids": ["TestDoctestReportingOption._run_doctest_report", "TestDoctestReportingOption"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctestReportingOption(object):\n def _run_doctest_report(self, testdir, format):\n testdir.makepyfile(\n \"\"\"\n def foo():\n '''\n >>> foo()\n a b\n 0 1 4\n 1 2 4\n 2 3 6\n '''\n print(' a b\\\\n'\n '0 1 4\\\\n'\n '1 2 5\\\\n'\n '2 3 6')\n \"\"\"\n )\n return testdir.runpytest(\"--doctest-modules\", \"--doctest-report\", format)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestReportingOption.test_doctest_report_udiff_TestDoctestReportingOption.test_doctest_report_udiff.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestReportingOption.test_doctest_report_udiff_TestDoctestReportingOption.test_doctest_report_udiff.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 1159, "end_line": 1164, "span_ids": ["TestDoctestReportingOption.test_doctest_report_udiff"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctestReportingOption(object):\n\n @pytest.mark.parametrize(\"format\", [\"udiff\", \"UDIFF\", \"uDiFf\"])\n def test_doctest_report_udiff(self, testdir, format):\n result = self._run_doctest_report(testdir, format)\n result.stdout.fnmatch_lines(\n [\" 0 1 4\", \" -1 2 4\", \" +1 2 5\", \" 2 3 6\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestReportingOption.test_doctest_report_cdiff_TestDoctestReportingOption.test_doctest_report_cdiff.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestReportingOption.test_doctest_report_cdiff_TestDoctestReportingOption.test_doctest_report_cdiff.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 1166, "end_line": 1180, "span_ids": ["TestDoctestReportingOption.test_doctest_report_cdiff"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctestReportingOption(object):\n\n def test_doctest_report_cdiff(self, testdir):\n result = self._run_doctest_report(testdir, \"cdiff\")\n result.stdout.fnmatch_lines(\n [\n \" a b\",\n \" 0 1 4\",\n \" ! 1 2 4\",\n \" 2 3 6\",\n \" --- 1,4 ----\",\n \" a b\",\n \" 0 1 4\",\n \" ! 1 2 5\",\n \" 2 3 6\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestReportingOption.test_doctest_report_ndiff_TestDoctestReportingOption.test_doctest_report_ndiff.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestReportingOption.test_doctest_report_ndiff_TestDoctestReportingOption.test_doctest_report_ndiff.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 1182, "end_line": 1194, "span_ids": ["TestDoctestReportingOption.test_doctest_report_ndiff"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctestReportingOption(object):\n\n def test_doctest_report_ndiff(self, testdir):\n result = self._run_doctest_report(testdir, \"ndiff\")\n result.stdout.fnmatch_lines(\n [\n \" a b\",\n \" 0 1 4\",\n \" - 1 2 4\",\n \" ? ^\",\n \" + 1 2 5\",\n \" ? ^\",\n \" 2 3 6\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestReportingOption.test_doctest_report_none_or_only_first_failure_TestDoctestReportingOption.test_doctest_report_invalid.result_stderr_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_TestDoctestReportingOption.test_doctest_report_none_or_only_first_failure_TestDoctestReportingOption.test_doctest_report_invalid.result_stderr_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 1196, "end_line": 1220, "span_ids": ["TestDoctestReportingOption.test_doctest_report_invalid", "TestDoctestReportingOption.test_doctest_report_none_or_only_first_failure"], "tokens": 230}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDoctestReportingOption(object):\n\n @pytest.mark.parametrize(\"format\", [\"none\", \"only_first_failure\"])\n def test_doctest_report_none_or_only_first_failure(self, testdir, format):\n result = self._run_doctest_report(testdir, format)\n result.stdout.fnmatch_lines(\n [\n \"Expected:\",\n \" a b\",\n \" 0 1 4\",\n \" 1 2 4\",\n \" 2 3 6\",\n \"Got:\",\n \" a b\",\n \" 0 1 4\",\n \" 1 2 5\",\n \" 2 3 6\",\n ]\n )\n\n def test_doctest_report_invalid(self, testdir):\n result = self._run_doctest_report(testdir, \"obviously_invalid_format\")\n result.stderr.fnmatch_lines(\n [\n \"*error: argument --doctest-report: invalid choice: 'obviously_invalid_format' (choose from*\"\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_test_doctest_mock_objects_dont_recurse_missbehaved_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_doctest.py_test_doctest_mock_objects_dont_recurse_missbehaved_", "embedding": null, "metadata": {"file_path": "testing/test_doctest.py", "file_name": "test_doctest.py", "file_type": "text/x-python", "category": "test", "start_line": 1223, "end_line": 1240, "span_ids": ["test_doctest_mock_objects_dont_recurse_missbehaved"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"mock_module\", [\"mock\", \"unittest.mock\"])\ndef test_doctest_mock_objects_dont_recurse_missbehaved(mock_module, testdir):\n pytest.importorskip(mock_module)\n testdir.makepyfile(\n \"\"\"\n from {mock_module} import call\n class Example(object):\n '''\n >>> 1 + 1\n 2\n '''\n \"\"\".format(\n mock_module=mock_module\n )\n )\n result = testdir.runpytest(\"--doctest-modules\")\n result.stdout.fnmatch_lines([\"* 1 passed *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_entry_points.py_from___future___import_ab_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_entry_points.py_from___future___import_ab_", "embedding": null, "metadata": {"file_path": "testing/test_entry_points.py", "file_name": "test_entry_points.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 18, "span_ids": ["imports", "test_entry_point_exist", "test_pytest_entry_points_are_identical"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pkg_resources\n\nimport pytest\n\n\n@pytest.mark.parametrize(\"entrypoint\", [\"py.test\", \"pytest\"])\ndef test_entry_point_exist(entrypoint):\n assert entrypoint in pkg_resources.get_entry_map(\"pytest\")[\"console_scripts\"]\n\n\ndef test_pytest_entry_points_are_identical():\n entryMap = pkg_resources.get_entry_map(\"pytest\")[\"console_scripts\"]\n assert entryMap[\"pytest\"].module_name == entryMap[\"py.test\"].module_name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_helpconfig.py_from___future___import_ab_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_helpconfig.py_from___future___import_ab_", "embedding": null, "metadata": {"file_path": "testing/test_helpconfig.py", "file_name": "test_helpconfig.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 78, "span_ids": ["test_traceconfig", "test_help", "test_PYTEST_DEBUG", "test_hookvalidation_optional", "imports", "test_hookvalidation_unknown", "test_version", "test_debug"], "tokens": 510}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pytest\nfrom _pytest.main import EXIT_NOTESTSCOLLECTED\n\n\ndef test_version(testdir, pytestconfig):\n result = testdir.runpytest(\"--version\")\n assert result.ret == 0\n # p = py.path.local(py.__file__).dirpath()\n result.stderr.fnmatch_lines(\n [\"*pytest*{}*imported from*\".format(pytest.__version__)]\n )\n if pytestconfig.pluginmanager.list_plugin_distinfo():\n result.stderr.fnmatch_lines([\"*setuptools registered plugins:\", \"*at*\"])\n\n\ndef test_help(testdir):\n result = testdir.runpytest(\"--help\")\n assert result.ret == 0\n result.stdout.fnmatch_lines(\n \"\"\"\n *-v*verbose*\n *setup.cfg*\n *minversion*\n *to see*markers*pytest --markers*\n *to see*fixtures*pytest --fixtures*\n \"\"\"\n )\n\n\ndef test_hookvalidation_unknown(testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_hello(xyz):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret != 0\n result.stdout.fnmatch_lines([\"*unknown hook*pytest_hello*\"])\n\n\ndef test_hookvalidation_optional(testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n @pytest.hookimpl(optionalhook=True)\n def pytest_hello(xyz):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret == EXIT_NOTESTSCOLLECTED\n\n\ndef test_traceconfig(testdir):\n result = testdir.runpytest(\"--traceconfig\")\n result.stdout.fnmatch_lines([\"*using*pytest*py*\", \"*active plugins*\"])\n\n\ndef test_debug(testdir, monkeypatch):\n result = testdir.runpytest_subprocess(\"--debug\")\n assert result.ret == EXIT_NOTESTSCOLLECTED\n p = testdir.tmpdir.join(\"pytestdebug.log\")\n assert \"pytest_sessionstart\" in p.read()\n\n\ndef test_PYTEST_DEBUG(testdir, monkeypatch):\n monkeypatch.setenv(\"PYTEST_DEBUG\", \"1\")\n result = testdir.runpytest_subprocess()\n assert result.ret == EXIT_NOTESTSCOLLECTED\n result.stderr.fnmatch_lines(\n [\"*pytest_plugin_registered*\", \"*manager*PluginManager*\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py__coding_utf_8__assert_attr.assert_on_node_expecte": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py__coding_utf_8__assert_attr.assert_on_node_expecte", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 34, "span_ids": ["imports", "assert_attr", "docstring", "runandparse"], "tokens": 227}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nfrom xml.dom import minidom\n\nimport py\n\nimport pytest\nfrom _pytest.junitxml import LogXML\nfrom _pytest.reports import BaseReport\n\n\ndef runandparse(testdir, *args):\n resultpath = testdir.tmpdir.join(\"junit.xml\")\n result = testdir.runpytest(\"--junitxml=%s\" % resultpath, *args)\n xmldoc = minidom.parse(str(resultpath))\n return result, DomNode(xmldoc)\n\n\ndef assert_attr(node, **kwargs):\n __tracebackhide__ = True\n\n def nodeval(node, name):\n anode = node.getAttributeNode(name)\n if anode is not None:\n return anode.value\n\n expected = {name: str(value) for name, value in kwargs.items()}\n on_node = {name: nodeval(node, name) for name in expected}\n assert on_node == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_DomNode_DomNode.next_siebling.return.type_self_self___node_ne": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_DomNode_DomNode.next_siebling.return.type_self_self___node_ne", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 37, "end_line": 85, "span_ids": ["DomNode.find_by_tag", "DomNode.assert_attr", "DomNode._by_tag", "DomNode.find_nth_by_tag", "DomNode.find_first_by_tag", "DomNode.__getitem__", "DomNode.next_siebling", "DomNode.text", "DomNode.toxml", "DomNode", "DomNode.tag", "DomNode.__repr__"], "tokens": 285}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class DomNode(object):\n def __init__(self, dom):\n self.__node = dom\n\n def __repr__(self):\n return self.__node.toxml()\n\n def find_first_by_tag(self, tag):\n return self.find_nth_by_tag(tag, 0)\n\n def _by_tag(self, tag):\n return self.__node.getElementsByTagName(tag)\n\n def find_nth_by_tag(self, tag, n):\n items = self._by_tag(tag)\n try:\n nth = items[n]\n except IndexError:\n pass\n else:\n return type(self)(nth)\n\n def find_by_tag(self, tag):\n t = type(self)\n return [t(x) for x in self.__node.getElementsByTagName(tag)]\n\n def __getitem__(self, key):\n node = self.__node.getAttributeNode(key)\n if node is not None:\n return node.value\n\n def assert_attr(self, **kwargs):\n __tracebackhide__ = True\n return assert_attr(self.__node, **kwargs)\n\n def toxml(self):\n return self.__node.toxml()\n\n @property\n def text(self):\n return self.__node.childNodes[0].wholeText\n\n @property\n def tag(self):\n return self.__node.tagName\n\n @property\n def next_siebling(self):\n return type(self)(self.__node.nextSibling)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython_TestPython.test_summing_simple.node_assert_attr_name_py": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython_TestPython.test_summing_simple.node_assert_attr_name_py", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 88, "end_line": 110, "span_ids": ["TestPython.test_summing_simple", "TestPython"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n def test_summing_simple(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n def test_pass():\n pass\n def test_fail():\n assert 0\n def test_skip():\n pytest.skip(\"\")\n @pytest.mark.xfail\n def test_xfail():\n assert 0\n @pytest.mark.xfail\n def test_xpass():\n assert 1\n \"\"\"\n )\n result, dom = runandparse(testdir)\n assert result.ret\n node = dom.find_first_by_tag(\"testsuite\")\n node.assert_attr(name=\"pytest\", errors=0, failures=1, skipped=2, tests=5)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_summing_simple_with_errors_TestPython.test_summing_simple_with_errors.node_assert_attr_name_py": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_summing_simple_with_errors_TestPython.test_summing_simple_with_errors.node_assert_attr_name_py", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 112, "end_line": 136, "span_ids": ["TestPython.test_summing_simple_with_errors"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n def test_summing_simple_with_errors(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture\n def fixture():\n raise Exception()\n def test_pass():\n pass\n def test_fail():\n assert 0\n def test_error(fixture):\n pass\n @pytest.mark.xfail\n def test_xfail():\n assert False\n @pytest.mark.xfail(strict=True)\n def test_xpass():\n assert True\n \"\"\"\n )\n result, dom = runandparse(testdir)\n assert result.ret\n node = dom.find_first_by_tag(\"testsuite\")\n node.assert_attr(name=\"pytest\", errors=1, failures=2, skipped=1, tests=5)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_timing_function_TestPython.test_timing_function.assert_round_float_val_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_timing_function_TestPython.test_timing_function.assert_round_float_val_", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 138, "end_line": 154, "span_ids": ["TestPython.test_timing_function"], "tokens": 131}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n def test_timing_function(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import time, pytest\n def setup_module():\n time.sleep(0.01)\n def teardown_module():\n time.sleep(0.01)\n def test_sleep():\n time.sleep(0.01)\n \"\"\"\n )\n result, dom = runandparse(testdir)\n node = dom.find_first_by_tag(\"testsuite\")\n tnode = node.find_first_by_tag(\"testcase\")\n val = tnode[\"time\"]\n assert round(float(val), 2) >= 0.03", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_junit_duration_report_TestPython.test_junit_duration_report.if_duration_report_to.else_.assert_val_1_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_junit_duration_report_TestPython.test_junit_duration_report.if_duration_report_to.else_.assert_val_1_0", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 156, "end_line": 185, "span_ids": ["TestPython.test_junit_duration_report"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n @pytest.mark.parametrize(\"duration_report\", [\"call\", \"total\"])\n def test_junit_duration_report(self, testdir, monkeypatch, duration_report):\n\n # mock LogXML.node_reporter so it always sets a known duration to each test report object\n original_node_reporter = LogXML.node_reporter\n\n def node_reporter_wrapper(s, report):\n report.duration = 1.0\n reporter = original_node_reporter(s, report)\n return reporter\n\n monkeypatch.setattr(LogXML, \"node_reporter\", node_reporter_wrapper)\n\n testdir.makepyfile(\n \"\"\"\n def test_foo():\n pass\n \"\"\"\n )\n result, dom = runandparse(\n testdir, \"-o\", \"junit_duration_report={}\".format(duration_report)\n )\n node = dom.find_first_by_tag(\"testsuite\")\n tnode = node.find_first_by_tag(\"testcase\")\n val = float(tnode[\"time\"])\n if duration_report == \"total\":\n assert val == 3.0\n else:\n assert duration_report == \"call\"\n assert val == 1.0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_setup_error_TestPython.test_setup_error.assert_ValueError_in_fn": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_setup_error_TestPython.test_setup_error.assert_ValueError_in_fn", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 187, "end_line": 207, "span_ids": ["TestPython.test_setup_error"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n def test_setup_error(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture\n def arg(request):\n raise ValueError()\n def test_function(arg):\n pass\n \"\"\"\n )\n result, dom = runandparse(testdir)\n assert result.ret\n node = dom.find_first_by_tag(\"testsuite\")\n node.assert_attr(errors=1, tests=1)\n tnode = node.find_first_by_tag(\"testcase\")\n tnode.assert_attr(classname=\"test_setup_error\", name=\"test_function\")\n fnode = tnode.find_first_by_tag(\"error\")\n fnode.assert_attr(message=\"test setup failure\")\n assert \"ValueError\" in fnode.toxml()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_teardown_error_TestPython.test_teardown_error.assert_ValueError_in_fn": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_teardown_error_TestPython.test_teardown_error.assert_ValueError_in_fn", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 209, "end_line": 229, "span_ids": ["TestPython.test_teardown_error"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n def test_teardown_error(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture\n def arg():\n yield\n raise ValueError()\n def test_function(arg):\n pass\n \"\"\"\n )\n result, dom = runandparse(testdir)\n assert result.ret\n node = dom.find_first_by_tag(\"testsuite\")\n tnode = node.find_first_by_tag(\"testcase\")\n tnode.assert_attr(classname=\"test_teardown_error\", name=\"test_function\")\n fnode = tnode.find_first_by_tag(\"error\")\n fnode.assert_attr(message=\"test teardown failure\")\n assert \"ValueError\" in fnode.toxml()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_call_failure_teardown_error_TestPython.test_call_failure_teardown_error.snode_assert_attr_message": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_call_failure_teardown_error_TestPython.test_call_failure_teardown_error.snode_assert_attr_message", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 231, "end_line": 254, "span_ids": ["TestPython.test_call_failure_teardown_error"], "tokens": 189}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n def test_call_failure_teardown_error(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture\n def arg():\n yield\n raise Exception(\"Teardown Exception\")\n def test_function(arg):\n raise Exception(\"Call Exception\")\n \"\"\"\n )\n result, dom = runandparse(testdir)\n assert result.ret\n node = dom.find_first_by_tag(\"testsuite\")\n node.assert_attr(errors=1, failures=1, tests=1)\n first, second = dom.find_by_tag(\"testcase\")\n if not first or not second or first == second:\n assert 0\n fnode = first.find_first_by_tag(\"failure\")\n fnode.assert_attr(message=\"Exception: Call Exception\")\n snode = second.find_first_by_tag(\"error\")\n snode.assert_attr(message=\"test teardown failure\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_skip_contains_name_reason_TestPython.test_skip_contains_name_reason.snode_assert_attr_type_p": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_skip_contains_name_reason_TestPython.test_skip_contains_name_reason.snode_assert_attr_type_p", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 256, "end_line": 271, "span_ids": ["TestPython.test_skip_contains_name_reason"], "tokens": 147}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n def test_skip_contains_name_reason(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n def test_skip():\n pytest.skip(\"hello23\")\n \"\"\"\n )\n result, dom = runandparse(testdir)\n assert result.ret == 0\n node = dom.find_first_by_tag(\"testsuite\")\n node.assert_attr(skipped=1)\n tnode = node.find_first_by_tag(\"testcase\")\n tnode.assert_attr(classname=\"test_skip_contains_name_reason\", name=\"test_skip\")\n snode = tnode.find_first_by_tag(\"skipped\")\n snode.assert_attr(type=\"pytest.skip\", message=\"hello23\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_mark_skip_contains_name_reason_TestPython.test_mark_skip_contains_name_reason.snode_assert_attr_type_p": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_mark_skip_contains_name_reason_TestPython.test_mark_skip_contains_name_reason.snode_assert_attr_type_p", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 273, "end_line": 291, "span_ids": ["TestPython.test_mark_skip_contains_name_reason"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n def test_mark_skip_contains_name_reason(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.skip(reason=\"hello24\")\n def test_skip():\n assert True\n \"\"\"\n )\n result, dom = runandparse(testdir)\n assert result.ret == 0\n node = dom.find_first_by_tag(\"testsuite\")\n node.assert_attr(skipped=1)\n tnode = node.find_first_by_tag(\"testcase\")\n tnode.assert_attr(\n classname=\"test_mark_skip_contains_name_reason\", name=\"test_skip\"\n )\n snode = tnode.find_first_by_tag(\"skipped\")\n snode.assert_attr(type=\"pytest.skip\", message=\"hello24\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_mark_skipif_contains_name_reason_TestPython.test_mark_skipif_contains_name_reason.snode_assert_attr_type_p": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_mark_skipif_contains_name_reason_TestPython.test_mark_skipif_contains_name_reason.snode_assert_attr_type_p", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 293, "end_line": 312, "span_ids": ["TestPython.test_mark_skipif_contains_name_reason"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n def test_mark_skipif_contains_name_reason(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n GLOBAL_CONDITION = True\n @pytest.mark.skipif(GLOBAL_CONDITION, reason=\"hello25\")\n def test_skip():\n assert True\n \"\"\"\n )\n result, dom = runandparse(testdir)\n assert result.ret == 0\n node = dom.find_first_by_tag(\"testsuite\")\n node.assert_attr(skipped=1)\n tnode = node.find_first_by_tag(\"testcase\")\n tnode.assert_attr(\n classname=\"test_mark_skipif_contains_name_reason\", name=\"test_skip\"\n )\n snode = tnode.find_first_by_tag(\"skipped\")\n snode.assert_attr(type=\"pytest.skip\", message=\"hello25\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_mark_skip_doesnt_capture_output_TestPython.test_classname_instance.tnode_assert_attr_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_mark_skip_doesnt_capture_output_TestPython.test_classname_instance.tnode_assert_attr_", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 314, "end_line": 343, "span_ids": ["TestPython.test_mark_skip_doesnt_capture_output", "TestPython.test_classname_instance"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n def test_mark_skip_doesnt_capture_output(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.skip(reason=\"foo\")\n def test_skip():\n print(\"bar!\")\n \"\"\"\n )\n result, dom = runandparse(testdir)\n assert result.ret == 0\n node_xml = dom.find_first_by_tag(\"testsuite\").toxml()\n assert \"bar!\" not in node_xml\n\n def test_classname_instance(self, testdir):\n testdir.makepyfile(\n \"\"\"\n class TestClass(object):\n def test_method(self):\n assert 0\n \"\"\"\n )\n result, dom = runandparse(testdir)\n assert result.ret\n node = dom.find_first_by_tag(\"testsuite\")\n node.assert_attr(failures=1)\n tnode = node.find_first_by_tag(\"testcase\")\n tnode.assert_attr(\n classname=\"test_classname_instance.TestClass\", name=\"test_method\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_classname_nested_dir_TestPython.test_internal_error.assert_Division_in_fnod": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_classname_nested_dir_TestPython.test_internal_error.assert_Division_in_fnod", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 345, "end_line": 366, "span_ids": ["TestPython.test_internal_error", "TestPython.test_classname_nested_dir"], "tokens": 258}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n def test_classname_nested_dir(self, testdir):\n p = testdir.tmpdir.ensure(\"sub\", \"test_hello.py\")\n p.write(\"def test_func(): 0/0\")\n result, dom = runandparse(testdir)\n assert result.ret\n node = dom.find_first_by_tag(\"testsuite\")\n node.assert_attr(failures=1)\n tnode = node.find_first_by_tag(\"testcase\")\n tnode.assert_attr(classname=\"sub.test_hello\", name=\"test_func\")\n\n def test_internal_error(self, testdir):\n testdir.makeconftest(\"def pytest_runtest_protocol(): 0 / 0\")\n testdir.makepyfile(\"def test_function(): pass\")\n result, dom = runandparse(testdir)\n assert result.ret\n node = dom.find_first_by_tag(\"testsuite\")\n node.assert_attr(errors=1, tests=1)\n tnode = node.find_first_by_tag(\"testcase\")\n tnode.assert_attr(classname=\"pytest\", name=\"internal\")\n fnode = tnode.find_first_by_tag(\"error\")\n fnode.assert_attr(message=\"internal error\")\n assert \"Division\" in fnode.toxml()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_failure_function_TestPython.test_failure_function.if_junit_logging_syst.elif_junit_logging_no.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_failure_function_TestPython.test_failure_function.if_junit_logging_syst.elif_junit_logging_no.None_1", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 368, "end_line": 410, "span_ids": ["TestPython.test_failure_function"], "tokens": 426}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n @pytest.mark.parametrize(\"junit_logging\", [\"no\", \"system-out\", \"system-err\"])\n def test_failure_function(self, testdir, junit_logging):\n testdir.makepyfile(\n \"\"\"\n import logging\n import sys\n\n def test_fail():\n print(\"hello-stdout\")\n sys.stderr.write(\"hello-stderr\\\\n\")\n logging.info('info msg')\n logging.warning('warning msg')\n raise ValueError(42)\n \"\"\"\n )\n\n result, dom = runandparse(testdir, \"-o\", \"junit_logging=%s\" % junit_logging)\n assert result.ret\n node = dom.find_first_by_tag(\"testsuite\")\n node.assert_attr(failures=1, tests=1)\n tnode = node.find_first_by_tag(\"testcase\")\n tnode.assert_attr(classname=\"test_failure_function\", name=\"test_fail\")\n fnode = tnode.find_first_by_tag(\"failure\")\n fnode.assert_attr(message=\"ValueError: 42\")\n assert \"ValueError\" in fnode.toxml()\n systemout = fnode.next_siebling\n assert systemout.tag == \"system-out\"\n assert \"hello-stdout\" in systemout.toxml()\n assert \"info msg\" not in systemout.toxml()\n systemerr = systemout.next_siebling\n assert systemerr.tag == \"system-err\"\n assert \"hello-stderr\" in systemerr.toxml()\n assert \"info msg\" not in systemerr.toxml()\n\n if junit_logging == \"system-out\":\n assert \"warning msg\" in systemout.toxml()\n assert \"warning msg\" not in systemerr.toxml()\n elif junit_logging == \"system-err\":\n assert \"warning msg\" not in systemout.toxml()\n assert \"warning msg\" in systemerr.toxml()\n elif junit_logging == \"no\":\n assert \"warning msg\" not in systemout.toxml()\n assert \"warning msg\" not in systemerr.toxml()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_failure_verbose_message_TestPython.test_failure_verbose_message.fnode_assert_attr_message": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_failure_verbose_message_TestPython.test_failure_verbose_message.fnode_assert_attr_message", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 412, "end_line": 425, "span_ids": ["TestPython.test_failure_verbose_message"], "tokens": 112}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n def test_failure_verbose_message(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import sys\n def test_fail():\n assert 0, \"An error\"\n \"\"\"\n )\n\n result, dom = runandparse(testdir)\n node = dom.find_first_by_tag(\"testsuite\")\n tnode = node.find_first_by_tag(\"testcase\")\n fnode = tnode.find_first_by_tag(\"failure\")\n fnode.assert_attr(message=\"AssertionError: An error assert 0\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_failure_escape_TestPython.test_failure_escape.for_index_char_in_enumer.assert_text_s_n_c": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_failure_escape_TestPython.test_failure_escape.for_index_char_in_enumer.assert_text_s_n_c", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 427, "end_line": 450, "span_ids": ["TestPython.test_failure_escape"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n def test_failure_escape(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.parametrize('arg1', \"<&'\", ids=\"<&'\")\n def test_func(arg1):\n print(arg1)\n assert 0\n \"\"\"\n )\n result, dom = runandparse(testdir)\n assert result.ret\n node = dom.find_first_by_tag(\"testsuite\")\n node.assert_attr(failures=3, tests=3)\n\n for index, char in enumerate(\"<&'\"):\n\n tnode = node.find_nth_by_tag(\"testcase\", index)\n tnode.assert_attr(\n classname=\"test_failure_escape\", name=\"test_func[%s]\" % char\n )\n sysout = tnode.find_first_by_tag(\"system-out\")\n text = sysout.text\n assert text == \"%s\\n\" % char", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_junit_prefixing_TestPython.test_junit_prefixing.tnode_assert_attr_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_junit_prefixing_TestPython.test_junit_prefixing.tnode_assert_attr_", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 452, "end_line": 471, "span_ids": ["TestPython.test_junit_prefixing"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n def test_junit_prefixing(self, testdir):\n testdir.makepyfile(\n \"\"\"\n def test_func():\n assert 0\n class TestHello(object):\n def test_hello(self):\n pass\n \"\"\"\n )\n result, dom = runandparse(testdir, \"--junitprefix=xyz\")\n assert result.ret\n node = dom.find_first_by_tag(\"testsuite\")\n node.assert_attr(failures=1, tests=2)\n tnode = node.find_first_by_tag(\"testcase\")\n tnode.assert_attr(classname=\"xyz.test_junit_prefixing\", name=\"test_func\")\n tnode = node.find_nth_by_tag(\"testcase\", 1)\n tnode.assert_attr(\n classname=\"xyz.test_junit_prefixing.TestHello\", name=\"test_hello\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_xfailure_function_TestPython.test_xfailure_function._assert_ValueError_in_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_xfailure_function_TestPython.test_xfailure_function._assert_ValueError_in_", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 473, "end_line": 489, "span_ids": ["TestPython.test_xfailure_function"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n def test_xfailure_function(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n def test_xfail():\n pytest.xfail(\"42\")\n \"\"\"\n )\n result, dom = runandparse(testdir)\n assert not result.ret\n node = dom.find_first_by_tag(\"testsuite\")\n node.assert_attr(skipped=1, tests=1)\n tnode = node.find_first_by_tag(\"testcase\")\n tnode.assert_attr(classname=\"test_xfailure_function\", name=\"test_xfail\")\n fnode = tnode.find_first_by_tag(\"skipped\")\n fnode.assert_attr(type=\"pytest.xfail\", message=\"42\")\n # assert \"ValueError\" in fnode.toxml()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_xfailure_marker_TestPython.test_xfailure_marker.fnode_assert_attr_type_p": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_xfailure_marker_TestPython.test_xfailure_marker.fnode_assert_attr_type_p", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 491, "end_line": 507, "span_ids": ["TestPython.test_xfailure_marker"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n def test_xfailure_marker(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.xfail(reason=\"42\")\n def test_xfail():\n assert False\n \"\"\"\n )\n result, dom = runandparse(testdir)\n assert not result.ret\n node = dom.find_first_by_tag(\"testsuite\")\n node.assert_attr(skipped=1, tests=1)\n tnode = node.find_first_by_tag(\"testcase\")\n tnode.assert_attr(classname=\"test_xfailure_marker\", name=\"test_xfail\")\n fnode = tnode.find_first_by_tag(\"skipped\")\n fnode.assert_attr(type=\"pytest.xfail\", message=\"42\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_xfail_captures_output_once_TestPython.test_xfail_captures_output_once.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_xfail_captures_output_once_TestPython.test_xfail_captures_output_once.None_2", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 509, "end_line": 526, "span_ids": ["TestPython.test_xfail_captures_output_once"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n def test_xfail_captures_output_once(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import sys\n import pytest\n\n @pytest.mark.xfail()\n def test_fail():\n sys.stdout.write('XFAIL This is stdout')\n sys.stderr.write('XFAIL This is stderr')\n assert 0\n \"\"\"\n )\n result, dom = runandparse(testdir)\n node = dom.find_first_by_tag(\"testsuite\")\n tnode = node.find_first_by_tag(\"testcase\")\n assert len(tnode.find_by_tag(\"system-err\")) == 1\n assert len(tnode.find_by_tag(\"system-out\")) == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_xfailure_xpass_TestPython.test_xfailure_xpass.tnode_assert_attr_classna": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_xfailure_xpass_TestPython.test_xfailure_xpass.tnode_assert_attr_classna", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 528, "end_line": 542, "span_ids": ["TestPython.test_xfailure_xpass"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n def test_xfailure_xpass(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.xfail\n def test_xpass():\n pass\n \"\"\"\n )\n result, dom = runandparse(testdir)\n # assert result.ret\n node = dom.find_first_by_tag(\"testsuite\")\n node.assert_attr(skipped=0, tests=1)\n tnode = node.find_first_by_tag(\"testcase\")\n tnode.assert_attr(classname=\"test_xfailure_xpass\", name=\"test_xpass\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_xfailure_xpass_strict_TestPython.test_xfailure_xpass_strict.fnode_assert_attr_message": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_xfailure_xpass_strict_TestPython.test_xfailure_xpass_strict.fnode_assert_attr_message", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 544, "end_line": 560, "span_ids": ["TestPython.test_xfailure_xpass_strict"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n def test_xfailure_xpass_strict(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.xfail(strict=True, reason=\"This needs to fail!\")\n def test_xpass():\n pass\n \"\"\"\n )\n result, dom = runandparse(testdir)\n # assert result.ret\n node = dom.find_first_by_tag(\"testsuite\")\n node.assert_attr(skipped=0, tests=1)\n tnode = node.find_first_by_tag(\"testcase\")\n tnode.assert_attr(classname=\"test_xfailure_xpass_strict\", name=\"test_xpass\")\n fnode = tnode.find_first_by_tag(\"failure\")\n fnode.assert_attr(message=\"[XPASS(strict)] This needs to fail!\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_collect_error_TestPython.test_unicode.if_not_sys_platform_start.assert_hx_in_fnode_toxm": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_collect_error_TestPython.test_unicode.if_not_sys_platform_start.assert_hx_in_fnode_toxm", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 562, "end_line": 589, "span_ids": ["TestPython.test_collect_error", "TestPython.test_unicode"], "tokens": 241}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n def test_collect_error(self, testdir):\n testdir.makepyfile(\"syntax error\")\n result, dom = runandparse(testdir)\n assert result.ret\n node = dom.find_first_by_tag(\"testsuite\")\n node.assert_attr(errors=1, tests=1)\n tnode = node.find_first_by_tag(\"testcase\")\n fnode = tnode.find_first_by_tag(\"error\")\n fnode.assert_attr(message=\"collection failure\")\n assert \"SyntaxError\" in fnode.toxml()\n\n def test_unicode(self, testdir):\n value = \"hx\\xc4\\x85\\xc4\\x87\\n\"\n testdir.makepyfile(\n \"\"\"\n # coding: latin1\n def test_hello():\n print(%r)\n assert 0\n \"\"\"\n % value\n )\n result, dom = runandparse(testdir)\n assert result.ret == 1\n tnode = dom.find_first_by_tag(\"testcase\")\n fnode = tnode.find_first_by_tag(\"failure\")\n if not sys.platform.startswith(\"java\"):\n assert \"hx\" in fnode.toxml()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_assertion_binchars_TestPython.test_pass_captures_stdout.assert_hello_stdout_in_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_assertion_binchars_TestPython.test_pass_captures_stdout.assert_hello_stdout_in_", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 591, "end_line": 617, "span_ids": ["TestPython.test_pass_captures_stdout", "TestPython.test_assertion_binchars"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n def test_assertion_binchars(self, testdir):\n \"\"\"this test did fail when the escaping wasnt strict\"\"\"\n testdir.makepyfile(\n \"\"\"\n\n M1 = '\\x01\\x02\\x03\\x04'\n M2 = '\\x01\\x02\\x03\\x05'\n\n def test_str_compare():\n assert M1 == M2\n \"\"\"\n )\n result, dom = runandparse(testdir)\n print(dom.toxml())\n\n def test_pass_captures_stdout(self, testdir):\n testdir.makepyfile(\n \"\"\"\n def test_pass():\n print('hello-stdout')\n \"\"\"\n )\n result, dom = runandparse(testdir)\n node = dom.find_first_by_tag(\"testsuite\")\n pnode = node.find_first_by_tag(\"testcase\")\n systemout = pnode.find_first_by_tag(\"system-out\")\n assert \"hello-stdout\" in systemout.toxml()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_pass_captures_stderr_TestPython.test_pass_captures_stderr.assert_hello_stderr_in_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_pass_captures_stderr_TestPython.test_pass_captures_stderr.assert_hello_stderr_in_", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 619, "end_line": 631, "span_ids": ["TestPython.test_pass_captures_stderr"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n def test_pass_captures_stderr(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import sys\n def test_pass():\n sys.stderr.write('hello-stderr')\n \"\"\"\n )\n result, dom = runandparse(testdir)\n node = dom.find_first_by_tag(\"testsuite\")\n pnode = node.find_first_by_tag(\"testcase\")\n systemout = pnode.find_first_by_tag(\"system-err\")\n assert \"hello-stderr\" in systemout.toxml()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_setup_error_captures_stdout_TestPython.test_setup_error_captures_stdout.assert_hello_stdout_in_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_setup_error_captures_stdout_TestPython.test_setup_error_captures_stdout.assert_hello_stdout_in_", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 633, "end_line": 650, "span_ids": ["TestPython.test_setup_error_captures_stdout"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n def test_setup_error_captures_stdout(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture\n def arg(request):\n print('hello-stdout')\n raise ValueError()\n def test_function(arg):\n pass\n \"\"\"\n )\n result, dom = runandparse(testdir)\n node = dom.find_first_by_tag(\"testsuite\")\n pnode = node.find_first_by_tag(\"testcase\")\n systemout = pnode.find_first_by_tag(\"system-out\")\n assert \"hello-stdout\" in systemout.toxml()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_setup_error_captures_stderr_TestPython.test_setup_error_captures_stderr.assert_hello_stderr_in_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_setup_error_captures_stderr_TestPython.test_setup_error_captures_stderr.assert_hello_stderr_in_", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 652, "end_line": 670, "span_ids": ["TestPython.test_setup_error_captures_stderr"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n def test_setup_error_captures_stderr(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import sys\n import pytest\n\n @pytest.fixture\n def arg(request):\n sys.stderr.write('hello-stderr')\n raise ValueError()\n def test_function(arg):\n pass\n \"\"\"\n )\n result, dom = runandparse(testdir)\n node = dom.find_first_by_tag(\"testsuite\")\n pnode = node.find_first_by_tag(\"testcase\")\n systemout = pnode.find_first_by_tag(\"system-err\")\n assert \"hello-stderr\" in systemout.toxml()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_avoid_double_stdout_TestPython.test_avoid_double_stdout.assert_hello_stdout_tear": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestPython.test_avoid_double_stdout_TestPython.test_avoid_double_stdout.assert_hello_stdout_tear", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 672, "end_line": 692, "span_ids": ["TestPython.test_avoid_double_stdout"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPython(object):\n\n def test_avoid_double_stdout(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import sys\n import pytest\n\n @pytest.fixture\n def arg(request):\n yield\n sys.stdout.write('hello-stdout teardown')\n raise ValueError()\n def test_function(arg):\n sys.stdout.write('hello-stdout call')\n \"\"\"\n )\n result, dom = runandparse(testdir)\n node = dom.find_first_by_tag(\"testsuite\")\n pnode = node.find_first_by_tag(\"testcase\")\n systemout = pnode.find_first_by_tag(\"system-out\")\n assert \"hello-stdout call\" in systemout.toxml()\n assert \"hello-stdout teardown\" in systemout.toxml()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_mangle_test_address_test_dont_configure_on_slaves.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_mangle_test_address_test_dont_configure_on_slaves.None_1", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 695, "end_line": 726, "span_ids": ["test_dont_configure_on_slaves.FakeConfig", "test_dont_configure_on_slaves.FakeConfig.__init__", "test_dont_configure_on_slaves", "test_mangle_test_address"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_mangle_test_address():\n from _pytest.junitxml import mangle_test_address\n\n address = \"::\".join([\"a/my.py.thing.py\", \"Class\", \"()\", \"method\", \"[a-1-::]\"])\n newnames = mangle_test_address(address)\n assert newnames == [\"a.my.py.thing\", \"Class\", \"method\", \"[a-1-::]\"]\n\n\ndef test_dont_configure_on_slaves(tmpdir):\n gotten = []\n\n class FakeConfig(object):\n def __init__(self):\n self.pluginmanager = self\n self.option = self\n\n def getini(self, name):\n return \"pytest\"\n\n junitprefix = None\n # XXX: shouldnt need tmpdir ?\n xmlpath = str(tmpdir.join(\"junix.xml\"))\n register = gotten.append\n\n fake_config = FakeConfig()\n from _pytest import junitxml\n\n junitxml.pytest_configure(fake_config)\n assert len(gotten) == 1\n FakeConfig.slaveinput = None\n junitxml.pytest_configure(fake_config)\n assert len(gotten) == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestNonPython_TestNonPython.test_summing_simple.assert_custom_item_runte": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_TestNonPython_TestNonPython.test_summing_simple.assert_custom_item_runte", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 729, "end_line": 756, "span_ids": ["TestNonPython.test_summing_simple", "TestNonPython"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestNonPython(object):\n def test_summing_simple(self, testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n def pytest_collect_file(path, parent):\n if path.ext == \".xyz\":\n return MyItem(path, parent)\n class MyItem(pytest.Item):\n def __init__(self, path, parent):\n super(MyItem, self).__init__(path.basename, parent)\n self.fspath = path\n def runtest(self):\n raise ValueError(42)\n def repr_failure(self, excinfo):\n return \"custom item runtest failed\"\n \"\"\"\n )\n testdir.tmpdir.join(\"myfile.xyz\").write(\"hello\")\n result, dom = runandparse(testdir)\n assert result.ret\n node = dom.find_first_by_tag(\"testsuite\")\n node.assert_attr(errors=0, failures=1, skipped=0, tests=1)\n tnode = node.find_first_by_tag(\"testcase\")\n tnode.assert_attr(name=\"myfile.xyz\")\n fnode = tnode.find_first_by_tag(\"failure\")\n fnode.assert_attr(message=\"custom item runtest failed\")\n assert \"custom item runtest failed\" in fnode.toxml()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_nullbyte_test_nullbyte.assert_x00_in_text": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_nullbyte_test_nullbyte.assert_x00_in_text", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 759, "end_line": 774, "span_ids": ["test_nullbyte"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nullbyte(testdir):\n # A null byte can not occur in XML (see section 2.2 of the spec)\n testdir.makepyfile(\n \"\"\"\n import sys\n def test_print_nullbyte():\n sys.stdout.write('Here the null -->' + chr(0) + '<--')\n sys.stdout.write('In repr form -->' + repr(chr(0)) + '<--')\n assert False\n \"\"\"\n )\n xmlf = testdir.tmpdir.join(\"junit.xml\")\n testdir.runpytest(\"--junitxml=%s\" % xmlf)\n text = xmlf.read()\n assert \"\\x00\" not in text\n assert \"#x00\" in text", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_nullbyte_replace_test_nullbyte_replace.assert_x0_in_text": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_nullbyte_replace_test_nullbyte_replace.assert_x0_in_text", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 777, "end_line": 791, "span_ids": ["test_nullbyte_replace"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nullbyte_replace(testdir):\n # Check if the null byte gets replaced\n testdir.makepyfile(\n \"\"\"\n import sys\n def test_print_nullbyte():\n sys.stdout.write('Here the null -->' + chr(0) + '<--')\n sys.stdout.write('In repr form -->' + repr(chr(0)) + '<--')\n assert False\n \"\"\"\n )\n xmlf = testdir.tmpdir.join(\"junit.xml\")\n testdir.runpytest(\"--junitxml=%s\" % xmlf)\n text = xmlf.read()\n assert \"#x0\" in text", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_invalid_xml_escape_test_invalid_xml_escape.for_i_in_valid_.assert_chr_i_bin_xml_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_invalid_xml_escape_test_invalid_xml_escape.for_i_in_valid_.assert_chr_i_bin_xml_", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 794, "end_line": 834, "span_ids": ["test_invalid_xml_escape"], "tokens": 366}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_invalid_xml_escape():\n # Test some more invalid xml chars, the full range should be\n # tested really but let's just thest the edges of the ranges\n # intead.\n # XXX This only tests low unicode character points for now as\n # there are some issues with the testing infrastructure for\n # the higher ones.\n # XXX Testing 0xD (\\r) is tricky as it overwrites the just written\n # line in the output, so we skip it too.\n global unichr\n try:\n unichr(65)\n except NameError:\n unichr = chr\n invalid = (\n 0x00,\n 0x1,\n 0xB,\n 0xC,\n 0xE,\n 0x19,\n 27, # issue #126\n 0xD800,\n 0xDFFF,\n 0xFFFE,\n 0x0FFFF,\n ) # , 0x110000)\n valid = (0x9, 0xA, 0x20)\n # 0xD, 0xD7FF, 0xE000, 0xFFFD, 0x10000, 0x10FFFF)\n\n from _pytest.junitxml import bin_xml_escape\n\n for i in invalid:\n got = bin_xml_escape(unichr(i)).uniobj\n if i <= 0xFF:\n expected = \"#x%02X\" % i\n else:\n expected = \"#x%04X\" % i\n assert got == expected\n for i in valid:\n assert chr(i) == bin_xml_escape(unichr(i)).uniobj", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_logxml_path_expansion_test_logxml_path_expansion.assert_xml_var_logfile_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_logxml_path_expansion_test_logxml_path_expansion.assert_xml_var_logfile_", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 837, "end_line": 845, "span_ids": ["test_logxml_path_expansion"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_logxml_path_expansion(tmpdir, monkeypatch):\n home_tilde = py.path.local(os.path.expanduser(\"~\")).join(\"test.xml\")\n xml_tilde = LogXML(\"~%stest.xml\" % tmpdir.sep, None)\n assert xml_tilde.logfile == home_tilde\n\n monkeypatch.setenv(\"HOME\", str(tmpdir))\n home_var = os.path.normpath(os.path.expandvars(\"$HOME/test.xml\"))\n xml_var = LogXML(\"$HOME%stest.xml\" % tmpdir.sep, None)\n assert xml_var.logfile == home_var", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_logxml_changingdir_test_escaped_parametrized_names_xml.node_assert_attr_name_te": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_logxml_changingdir_test_escaped_parametrized_names_xml.node_assert_attr_name_te", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 848, "end_line": 893, "span_ids": ["test_logxml_changingdir", "test_logxml_makedir", "test_escaped_parametrized_names_xml", "test_logxml_check_isdir"], "tokens": 329}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_logxml_changingdir(testdir):\n testdir.makepyfile(\n \"\"\"\n def test_func():\n import os\n os.chdir(\"a\")\n \"\"\"\n )\n testdir.tmpdir.mkdir(\"a\")\n result = testdir.runpytest(\"--junitxml=a/x.xml\")\n assert result.ret == 0\n assert testdir.tmpdir.join(\"a/x.xml\").check()\n\n\ndef test_logxml_makedir(testdir):\n \"\"\"--junitxml should automatically create directories for the xml file\"\"\"\n testdir.makepyfile(\n \"\"\"\n def test_pass():\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"--junitxml=path/to/results.xml\")\n assert result.ret == 0\n assert testdir.tmpdir.join(\"path/to/results.xml\").check()\n\n\ndef test_logxml_check_isdir(testdir):\n \"\"\"Give an error if --junit-xml is a directory (#2089)\"\"\"\n result = testdir.runpytest(\"--junit-xml=.\")\n result.stderr.fnmatch_lines([\"*--junitxml must be a filename*\"])\n\n\ndef test_escaped_parametrized_names_xml(testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.parametrize('char', [u\"\\\\x00\"])\n def test_func(char):\n assert char\n \"\"\"\n )\n result, dom = runandparse(testdir)\n assert result.ret == 0\n node = dom.find_first_by_tag(\"testcase\")\n node.assert_attr(name=\"test_func[\\\\x00]\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_double_colon_split_function_issue469_test_double_colon_split_function_issue469.node_assert_attr_name_te": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_double_colon_split_function_issue469_test_double_colon_split_function_issue469.node_assert_attr_name_te", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 896, "end_line": 909, "span_ids": ["test_double_colon_split_function_issue469"], "tokens": 111}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_double_colon_split_function_issue469(testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.parametrize('param', [\"double::colon\"])\n def test_func(param):\n pass\n \"\"\"\n )\n result, dom = runandparse(testdir)\n assert result.ret == 0\n node = dom.find_first_by_tag(\"testcase\")\n node.assert_attr(classname=\"test_double_colon_split_function_issue469\")\n node.assert_attr(name=\"test_func[double::colon]\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_double_colon_split_method_issue469_test_double_colon_split_method_issue469.node_assert_attr_name_te": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_double_colon_split_method_issue469_test_double_colon_split_method_issue469.node_assert_attr_name_te", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 912, "end_line": 926, "span_ids": ["test_double_colon_split_method_issue469"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_double_colon_split_method_issue469(testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n class TestClass(object):\n @pytest.mark.parametrize('param', [\"double::colon\"])\n def test_func(self, param):\n pass\n \"\"\"\n )\n result, dom = runandparse(testdir)\n assert result.ret == 0\n node = dom.find_first_by_tag(\"testcase\")\n node.assert_attr(classname=\"test_double_colon_split_method_issue469.TestClass\")\n node.assert_attr(name=\"test_func[double::colon]\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_unicode_issue368_test_unicode_issue368.log_pytest_sessionfinish_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_unicode_issue368_test_unicode_issue368.log_pytest_sessionfinish_", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 929, "end_line": 955, "span_ids": ["test_unicode_issue368", "test_unicode_issue368.Report", "test_unicode_issue368.Report:2"], "tokens": 243}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_unicode_issue368(testdir):\n path = testdir.tmpdir.join(\"test.xml\")\n log = LogXML(str(path), None)\n ustr = u\"\u0412\u041d\u0418!\"\n\n class Report(BaseReport):\n longrepr = ustr\n sections = []\n nodeid = \"something\"\n location = \"tests/filename.py\", 42, \"TestClass.method\"\n\n test_report = Report()\n\n # hopefully this is not too brittle ...\n log.pytest_sessionstart()\n node_reporter = log._opentestcase(test_report)\n node_reporter.append_failure(test_report)\n node_reporter.append_collect_error(test_report)\n node_reporter.append_collect_skipped(test_report)\n node_reporter.append_error(test_report)\n test_report.longrepr = \"filename\", 1, ustr\n node_reporter.append_skipped(test_report)\n test_report.longrepr = \"filename\", 1, \"Skipped: \u5361\u5623\u5623\"\n node_reporter.append_skipped(test_report)\n test_report.wasxfail = ustr\n node_reporter.append_skipped(test_report)\n log.pytest_sessionfinish()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_record_property_test_record_property.pnodes_1_assert_attr_nam": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_record_property_test_record_property.pnodes_1_assert_attr_nam", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 958, "end_line": 976, "span_ids": ["test_record_property"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_record_property(testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture\n def other(record_property):\n record_property(\"bar\", 1)\n def test_record(record_property, other):\n record_property(\"foo\", \"<1\");\n \"\"\"\n )\n result, dom = runandparse(testdir, \"-rwv\")\n node = dom.find_first_by_tag(\"testsuite\")\n tnode = node.find_first_by_tag(\"testcase\")\n psnode = tnode.find_first_by_tag(\"properties\")\n pnodes = psnode.find_by_tag(\"property\")\n pnodes[0].assert_attr(name=\"bar\", value=\"1\")\n pnodes[1].assert_attr(name=\"foo\", value=\"<1\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_record_property_same_name_test_record_fixtures_without_junitxml.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_record_property_same_name_test_record_fixtures_without_junitxml.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 979, "end_line": 1007, "span_ids": ["test_record_property_same_name", "test_record_fixtures_without_junitxml"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_record_property_same_name(testdir):\n testdir.makepyfile(\n \"\"\"\n def test_record_with_same_name(record_property):\n record_property(\"foo\", \"bar\")\n record_property(\"foo\", \"baz\")\n \"\"\"\n )\n result, dom = runandparse(testdir, \"-rw\")\n node = dom.find_first_by_tag(\"testsuite\")\n tnode = node.find_first_by_tag(\"testcase\")\n psnode = tnode.find_first_by_tag(\"properties\")\n pnodes = psnode.find_by_tag(\"property\")\n pnodes[0].assert_attr(name=\"foo\", value=\"bar\")\n pnodes[1].assert_attr(name=\"foo\", value=\"baz\")\n\n\n@pytest.mark.parametrize(\"fixture_name\", [\"record_property\", \"record_xml_attribute\"])\ndef test_record_fixtures_without_junitxml(testdir, fixture_name):\n testdir.makepyfile(\n \"\"\"\n def test_record({fixture_name}):\n {fixture_name}(\"foo\", \"bar\")\n \"\"\".format(\n fixture_name=fixture_name\n )\n )\n result = testdir.runpytest()\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_record_attribute_test_record_attribute.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_record_attribute_test_record_attribute.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 1010, "end_line": 1036, "span_ids": ["test_record_attribute"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"default\")\ndef test_record_attribute(testdir):\n testdir.makeini(\n \"\"\"\n [pytest]\n junit_family = xunit1\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture\n def other(record_xml_attribute):\n record_xml_attribute(\"bar\", 1)\n def test_record(record_xml_attribute, other):\n record_xml_attribute(\"foo\", \"<1\");\n \"\"\"\n )\n result, dom = runandparse(testdir, \"-rw\")\n node = dom.find_first_by_tag(\"testsuite\")\n tnode = node.find_first_by_tag(\"testcase\")\n tnode.assert_attr(bar=\"1\")\n tnode.assert_attr(foo=\"<1\")\n result.stdout.fnmatch_lines(\n [\"*test_record_attribute.py:6:*record_xml_attribute is an experimental feature\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_record_fixtures_xunit2_test_record_fixtures_xunit2.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_record_fixtures_xunit2_test_record_fixtures_xunit2.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 1039, "end_line": 1076, "span_ids": ["test_record_fixtures_xunit2"], "tokens": 279}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"default\")\n@pytest.mark.parametrize(\"fixture_name\", [\"record_xml_attribute\", \"record_property\"])\ndef test_record_fixtures_xunit2(testdir, fixture_name):\n \"\"\"Ensure record_xml_attribute and record_property drop values when outside of legacy family\n \"\"\"\n testdir.makeini(\n \"\"\"\n [pytest]\n junit_family = xunit2\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture\n def other({fixture_name}):\n {fixture_name}(\"bar\", 1)\n def test_record({fixture_name}, other):\n {fixture_name}(\"foo\", \"<1\");\n \"\"\".format(\n fixture_name=fixture_name\n )\n )\n\n result, dom = runandparse(testdir, \"-rw\")\n expected_lines = []\n if fixture_name == \"record_xml_attribute\":\n expected_lines.append(\n \"*test_record_fixtures_xunit2.py:6:*record_xml_attribute is an experimental feature\"\n )\n expected_lines = [\n \"*test_record_fixtures_xunit2.py:6:*{fixture_name} is incompatible \"\n \"with junit_family 'xunit2' (use 'legacy' or 'xunit1')\".format(\n fixture_name=fixture_name\n )\n ]\n result.stdout.fnmatch_lines(expected_lines)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_random_report_log_xdist_test_random_report_log_xdist.assert_failed_test_x": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_random_report_log_xdist_test_random_report_log_xdist.assert_failed_test_x", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 1079, "end_line": 1101, "span_ids": ["test_random_report_log_xdist"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_random_report_log_xdist(testdir, monkeypatch):\n \"\"\"xdist calls pytest_runtest_logreport as they are executed by the slaves,\n with nodes from several nodes overlapping, so junitxml must cope with that\n to produce correct reports. #1064\n \"\"\"\n pytest.importorskip(\"xdist\")\n monkeypatch.delenv(\"PYTEST_DISABLE_PLUGIN_AUTOLOAD\", raising=False)\n testdir.makepyfile(\n \"\"\"\n import pytest, time\n @pytest.mark.parametrize('i', list(range(30)))\n def test_x(i):\n assert i != 22\n \"\"\"\n )\n _, dom = runandparse(testdir, \"-n2\")\n suite_node = dom.find_first_by_tag(\"testsuite\")\n failed = []\n for case_node in suite_node.find_by_tag(\"testcase\"):\n if case_node.find_first_by_tag(\"failure\"):\n failed.append(case_node[\"name\"])\n\n assert failed == [\"test_x[22]\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_runs_twice_test_runs_twice_xdist.assert_first_second": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_runs_twice_test_runs_twice_xdist.assert_first_second", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 1104, "end_line": 1131, "span_ids": ["test_runs_twice_xdist", "test_runs_twice"], "tokens": 204}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_runs_twice(testdir):\n f = testdir.makepyfile(\n \"\"\"\n def test_pass():\n pass\n \"\"\"\n )\n\n result, dom = runandparse(testdir, f, f)\n assert \"INTERNALERROR\" not in result.stdout.str()\n first, second = [x[\"classname\"] for x in dom.find_by_tag(\"testcase\")]\n assert first == second\n\n\n@pytest.mark.xfail(reason=\"hangs\", run=False)\ndef test_runs_twice_xdist(testdir):\n pytest.importorskip(\"xdist\")\n f = testdir.makepyfile(\n \"\"\"\n def test_pass():\n pass\n \"\"\"\n )\n\n result, dom = runandparse(testdir, f, \"--dist\", \"each\", \"--tx\", \"2*popen\")\n assert \"INTERNALERROR\" not in result.stdout.str()\n first, second = [x[\"classname\"] for x in dom.find_by_tag(\"testcase\")]\n assert first == second", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_fancy_items_regression_test_fancy_items_regression.assert_items_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_fancy_items_regression_test_fancy_items_regression.assert_items_", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 1134, "end_line": 1183, "span_ids": ["test_fancy_items_regression"], "tokens": 289}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fancy_items_regression(testdir):\n # issue 1259\n testdir.makeconftest(\n \"\"\"\n import pytest\n class FunItem(pytest.Item):\n def runtest(self):\n pass\n class NoFunItem(pytest.Item):\n def runtest(self):\n pass\n\n class FunCollector(pytest.File):\n def collect(self):\n return [\n FunItem('a', self),\n NoFunItem('a', self),\n NoFunItem('b', self),\n ]\n\n def pytest_collect_file(path, parent):\n if path.check(ext='.py'):\n return FunCollector(path, parent)\n \"\"\"\n )\n\n testdir.makepyfile(\n \"\"\"\n def test_pass():\n pass\n \"\"\"\n )\n\n result, dom = runandparse(testdir)\n\n assert \"INTERNALERROR\" not in result.stdout.str()\n\n items = sorted(\"%(classname)s %(name)s\" % x for x in dom.find_by_tag(\"testcase\"))\n import pprint\n\n pprint.pprint(items)\n assert items == [\n u\"conftest a\",\n u\"conftest a\",\n u\"conftest b\",\n u\"test_fancy_items_regression a\",\n u\"test_fancy_items_regression a\",\n u\"test_fancy_items_regression b\",\n u\"test_fancy_items_regression test_pass\",\n ]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_global_properties_test_global_properties.assert_actual_expected": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_global_properties_test_global_properties.assert_actual_expected", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 1186, "end_line": 1217, "span_ids": ["test_global_properties.Report", "test_global_properties", "test_global_properties.Report:2"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_global_properties(testdir):\n path = testdir.tmpdir.join(\"test_global_properties.xml\")\n log = LogXML(str(path), None)\n\n class Report(BaseReport):\n sections = []\n nodeid = \"test_node_id\"\n\n log.pytest_sessionstart()\n log.add_global_property(\"foo\", 1)\n log.add_global_property(\"bar\", 2)\n log.pytest_sessionfinish()\n\n dom = minidom.parse(str(path))\n\n properties = dom.getElementsByTagName(\"properties\")\n\n assert properties.length == 1, \"There must be one node\"\n\n property_list = dom.getElementsByTagName(\"property\")\n\n assert property_list.length == 2, \"There most be only 2 property nodes\"\n\n expected = {\"foo\": \"1\", \"bar\": \"2\"}\n actual = {}\n\n for p in property_list:\n k = str(p.getAttribute(\"name\"))\n v = str(p.getAttribute(\"value\"))\n actual[k] = v\n\n assert actual == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_url_property_test_url_property.assert_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_url_property_test_url_property.assert_", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 1220, "end_line": 1243, "span_ids": ["test_url_property", "test_url_property.Report:2", "test_url_property.Report"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_url_property(testdir):\n test_url = \"http://www.github.com/pytest-dev\"\n path = testdir.tmpdir.join(\"test_url_property.xml\")\n log = LogXML(str(path), None)\n\n class Report(BaseReport):\n longrepr = \"FooBarBaz\"\n sections = []\n nodeid = \"something\"\n location = \"tests/filename.py\", 42, \"TestClass.method\"\n url = test_url\n\n test_report = Report()\n\n log.pytest_sessionstart()\n node_reporter = log._opentestcase(test_report)\n node_reporter.append_failure(test_report)\n log.pytest_sessionfinish()\n\n test_case = minidom.parse(str(path)).getElementsByTagName(\"testcase\")[0]\n\n assert (\n test_case.getAttribute(\"url\") == test_url\n ), \"The URL did not get written to the xml\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_set_suite_name_test_set_suite_name.node_assert_attr_name_exp": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_set_suite_name_test_set_suite_name.node_assert_attr_name_exp", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 1293, "end_line": 1318, "span_ids": ["test_set_suite_name"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"suite_name\", [\"my_suite\", \"\"])\ndef test_set_suite_name(testdir, suite_name):\n if suite_name:\n testdir.makeini(\n \"\"\"\n [pytest]\n junit_suite_name={}\n \"\"\".format(\n suite_name\n )\n )\n expected = suite_name\n else:\n expected = \"pytest\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n def test_func():\n pass\n \"\"\"\n )\n result, dom = runandparse(testdir)\n assert result.ret == 0\n node = dom.find_first_by_tag(\"testsuite\")\n node.assert_attr(name=expected)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_escaped_skipreason_issue3533_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_junitxml.py_test_escaped_skipreason_issue3533_", "embedding": null, "metadata": {"file_path": "testing/test_junitxml.py", "file_name": "test_junitxml.py", "file_type": "text/x-python", "category": "test", "start_line": 1321, "end_line": 1335, "span_ids": ["test_escaped_skipreason_issue3533"], "tokens": 109}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_escaped_skipreason_issue3533(testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.skip(reason='1 <> 2')\n def test_skip():\n pass\n \"\"\"\n )\n _, dom = runandparse(testdir)\n node = dom.find_first_by_tag(\"testcase\")\n snode = node.find_first_by_tag(\"skipped\")\n assert \"1 <> 2\" in snode.text\n snode.assert_attr(message=\"1 <> 2\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_from___future___import_ab_ignore_markinfo.pytest_mark_filterwarning": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_from___future___import_ab_ignore_markinfo.pytest_mark_filterwarning", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 26, "span_ids": ["impl", "imports:16", "imports:15", "impl:2", "impl:3", "imports"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nimport six\n\nimport pytest\nfrom _pytest.main import EXIT_INTERRUPTED\nfrom _pytest.mark import EMPTY_PARAMETERSET_OPTION\nfrom _pytest.mark import MarkGenerator as Mark\nfrom _pytest.nodes import Collector\nfrom _pytest.nodes import Node\nfrom _pytest.warning_types import PytestDeprecationWarning\nfrom _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG\n\ntry:\n import mock\nexcept ImportError:\n import unittest.mock as mock\n\nignore_markinfo = pytest.mark.filterwarnings(\n \"ignore:MarkInfo objects:pytest.RemovedInPytest4Warning\"\n)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestMark_TestMark.test_pytest_mark_name_starts_with_underscore.with_pytest_raises_Attrib.mark__some_name": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestMark_TestMark.test_pytest_mark_name_starts_with_underscore.with_pytest_raises_Attrib.mark__some_name", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 29, "end_line": 56, "span_ids": ["TestMark.test_pytest_mark_name_starts_with_underscore", "TestMark.test_pytest_exists_in_namespace_all", "TestMark", "TestMark.test_mark_with_param.SomeClass:2", "TestMark.test_mark_with_param", "TestMark.test_pytest_mark_notcallable", "TestMark.test_mark_with_param.SomeClass"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestMark(object):\n @pytest.mark.parametrize(\"attr\", [\"mark\", \"param\"])\n @pytest.mark.parametrize(\"modulename\", [\"py.test\", \"pytest\"])\n def test_pytest_exists_in_namespace_all(self, attr, modulename):\n module = sys.modules[modulename]\n assert attr in module.__all__\n\n def test_pytest_mark_notcallable(self):\n mark = Mark()\n pytest.raises((AttributeError, TypeError), mark)\n\n def test_mark_with_param(self):\n def some_function(abc):\n pass\n\n class SomeClass(object):\n pass\n\n assert pytest.mark.fun(some_function) is some_function\n assert pytest.mark.fun.with_args(some_function) is not some_function\n\n assert pytest.mark.fun(SomeClass) is SomeClass\n assert pytest.mark.fun.with_args(SomeClass) is not SomeClass\n\n def test_pytest_mark_name_starts_with_underscore(self):\n mark = Mark()\n with pytest.raises(AttributeError):\n mark._some_name", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_marked_class_run_twice_test_marked_class_run_twice.rec_assertoutcome_passed_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_marked_class_run_twice_test_marked_class_run_twice.rec_assertoutcome_passed_", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 59, "end_line": 74, "span_ids": ["test_marked_class_run_twice"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_marked_class_run_twice(testdir, request):\n \"\"\"Test fails file is run twice that contains marked class.\n See issue#683.\n \"\"\"\n py_file = testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.parametrize('abc', [1, 2, 3])\n class Test1(object):\n def test_1(self, abc):\n assert abc in [1, 2, 3]\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n rec = testdir.inline_run(file_name, file_name)\n rec.assertoutcome(passed=6)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_ini_markers_test_ini_markers.rec_assertoutcome_passed_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_ini_markers_test_ini_markers.rec_assertoutcome_passed_", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 77, "end_line": 97, "span_ids": ["test_ini_markers"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_ini_markers(testdir):\n testdir.makeini(\n \"\"\"\n [pytest]\n markers =\n a1: this is a webtest marker\n a2: this is a smoke marker\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n def test_markers(pytestconfig):\n markers = pytestconfig.getini(\"markers\")\n print(markers)\n assert len(markers) >= 2\n assert markers[0].startswith(\"a1:\")\n assert markers[1].startswith(\"a2:\")\n \"\"\"\n )\n rec = testdir.inline_run()\n rec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_markers_option_test_ini_markers_whitespace.rec_assertoutcome_passed_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_markers_option_test_ini_markers_whitespace.rec_assertoutcome_passed_", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 100, "end_line": 134, "span_ids": ["test_ini_markers_whitespace", "test_markers_option"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_markers_option(testdir):\n testdir.makeini(\n \"\"\"\n [pytest]\n markers =\n a1: this is a webtest marker\n a1some: another marker\n nodescription\n \"\"\"\n )\n result = testdir.runpytest(\"--markers\")\n result.stdout.fnmatch_lines(\n [\"*a1*this is a webtest*\", \"*a1some*another marker\", \"*nodescription*\"]\n )\n\n\ndef test_ini_markers_whitespace(testdir):\n testdir.makeini(\n \"\"\"\n [pytest]\n markers =\n a1 : this is a whitespace marker\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.mark.a1\n def test_markers():\n assert True\n \"\"\"\n )\n rec = testdir.inline_run(\"--strict\", \"-m\", \"a1\")\n rec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_marker_without_description_test_marker_without_description.rec_assert_outcomes_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_marker_without_description_test_marker_without_description.rec_assert_outcomes_", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 137, "end_line": 154, "span_ids": ["test_marker_without_description"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_marker_without_description(testdir):\n testdir.makefile(\n \".cfg\",\n setup=\"\"\"\n [tool:pytest]\n markers=slow\n \"\"\",\n )\n testdir.makeconftest(\n \"\"\"\n import pytest\n pytest.mark.xfail('FAIL')\n \"\"\"\n )\n ftdir = testdir.mkdir(\"ft1_dummy\")\n testdir.tmpdir.join(\"conftest.py\").move(ftdir.join(\"conftest.py\"))\n rec = testdir.runpytest(\"--strict\")\n rec.assert_outcomes()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_markers_option_with_plugin_in_current_dir_test_markers_option_with_plugin_in_current_dir.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_markers_option_with_plugin_in_current_dir_test_markers_option_with_plugin_in_current_dir.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 157, "end_line": 180, "span_ids": ["test_markers_option_with_plugin_in_current_dir"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_markers_option_with_plugin_in_current_dir(testdir):\n testdir.makeconftest('pytest_plugins = \"flip_flop\"')\n testdir.makepyfile(\n flip_flop=\"\"\"\\\n def pytest_configure(config):\n config.addinivalue_line(\"markers\", \"flip:flop\")\n\n def pytest_generate_tests(metafunc):\n try:\n mark = metafunc.function.flipper\n except AttributeError:\n return\n metafunc.parametrize(\"x\", (10, 20))\"\"\"\n )\n testdir.makepyfile(\n \"\"\"\\\n import pytest\n @pytest.mark.flipper\n def test_example(x):\n assert x\"\"\"\n )\n\n result = testdir.runpytest(\"--markers\")\n result.stdout.fnmatch_lines([\"*flip*flop*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_mark_on_pseudo_function_test_strict_prohibits_unregistered_markers.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_mark_on_pseudo_function_test_strict_prohibits_unregistered_markers.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 183, "end_line": 208, "span_ids": ["test_strict_prohibits_unregistered_markers", "test_mark_on_pseudo_function"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_mark_on_pseudo_function(testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.mark.r(lambda x: 0/0)\n def test_hello():\n pass\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)\n\n\ndef test_strict_prohibits_unregistered_markers(testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.unregisteredmark\n def test_hello():\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"--strict\")\n assert result.ret != 0\n result.stdout.fnmatch_lines([\"'unregisteredmark' is not a registered marker\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_mark_option_test_mark_option.assert_list_passed_li": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_mark_option_test_mark_option.assert_list_passed_li", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 214, "end_line": 240, "span_ids": ["test_mark_option"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"spec\",\n [\n (\"xyz\", (\"test_one\",)),\n (\"xyz and xyz2\", ()),\n (\"xyz2\", (\"test_two\",)),\n (\"xyz or xyz2\", (\"test_one\", \"test_two\")),\n ],\n)\ndef test_mark_option(spec, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.xyz\n def test_one():\n pass\n @pytest.mark.xyz2\n def test_two():\n pass\n \"\"\"\n )\n opt, passed_result = spec\n rec = testdir.inline_run(\"-m\", opt)\n passed, skipped, fail = rec.listoutcomes()\n passed = [x.nodeid.split(\"::\")[-1] for x in passed]\n assert len(passed) == len(passed_result)\n assert list(passed) == list(passed_result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_mark_option_custom_test_mark_option_custom.assert_list_passed_li": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_mark_option_custom_test_mark_option_custom.assert_list_passed_li", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 243, "end_line": 269, "span_ids": ["test_mark_option_custom"], "tokens": 195}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"spec\", [(\"interface\", (\"test_interface\",)), (\"not interface\", (\"test_nointer\",))]\n)\ndef test_mark_option_custom(spec, testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n def pytest_collection_modifyitems(items):\n for item in items:\n if \"interface\" in item.nodeid:\n item.add_marker(pytest.mark.interface)\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n def test_interface():\n pass\n def test_nointer():\n pass\n \"\"\"\n )\n opt, passed_result = spec\n rec = testdir.inline_run(\"-m\", opt)\n passed, skipped, fail = rec.listoutcomes()\n passed = [x.nodeid.split(\"::\")[-1] for x in passed]\n assert len(passed) == len(passed_result)\n assert list(passed) == list(passed_result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_keyword_option_custom_test_keyword_option_considers_mark.assert_len_passed_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_keyword_option_custom_test_keyword_option_considers_mark.assert_len_passed_1", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 272, "end_line": 304, "span_ids": ["test_keyword_option_custom", "test_keyword_option_considers_mark"], "tokens": 241}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"spec\",\n [\n (\"interface\", (\"test_interface\",)),\n (\"not interface\", (\"test_nointer\", \"test_pass\")),\n (\"pass\", (\"test_pass\",)),\n (\"not pass\", (\"test_interface\", \"test_nointer\")),\n ],\n)\ndef test_keyword_option_custom(spec, testdir):\n testdir.makepyfile(\n \"\"\"\n def test_interface():\n pass\n def test_nointer():\n pass\n def test_pass():\n pass\n \"\"\"\n )\n opt, passed_result = spec\n rec = testdir.inline_run(\"-k\", opt)\n passed, skipped, fail = rec.listoutcomes()\n passed = [x.nodeid.split(\"::\")[-1] for x in passed]\n assert len(passed) == len(passed_result)\n assert list(passed) == list(passed_result)\n\n\ndef test_keyword_option_considers_mark(testdir):\n testdir.copy_example(\"marks/marks_considered_keywords\")\n rec = testdir.inline_run(\"-k\", \"foo\")\n passed = rec.listoutcomes()[0]\n assert len(passed) == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_keyword_option_parametrize_test_keyword_option_parametrize.assert_list_passed_li": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_keyword_option_parametrize_test_keyword_option_parametrize.assert_list_passed_li", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 307, "end_line": 329, "span_ids": ["test_keyword_option_parametrize"], "tokens": 187}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"spec\",\n [\n (\"None\", (\"test_func[None]\",)),\n (\"1.3\", (\"test_func[1.3]\",)),\n (\"2-3\", (\"test_func[2-3]\",)),\n ],\n)\ndef test_keyword_option_parametrize(spec, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.parametrize(\"arg\", [None, 1.3, \"2-3\"])\n def test_func(arg):\n pass\n \"\"\"\n )\n opt, passed_result = spec\n rec = testdir.inline_run(\"-k\", opt)\n passed, skipped, fail = rec.listoutcomes()\n passed = [x.nodeid.split(\"::\")[-1] for x in passed]\n assert len(passed) == len(passed_result)\n assert list(passed) == list(passed_result)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_keyword_option_wrong_arguments_test_keyword_option_wrong_arguments.assert_expected_result_in": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_keyword_option_wrong_arguments_test_keyword_option_wrong_arguments.assert_expected_result_in", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 332, "end_line": 352, "span_ids": ["test_keyword_option_wrong_arguments"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"spec\",\n [\n (\n \"foo or import\",\n \"ERROR: Python keyword 'import' not accepted in expressions passed to '-k'\",\n ),\n (\"foo or\", \"ERROR: Wrong expression passed to '-k': foo or\"),\n ],\n)\ndef test_keyword_option_wrong_arguments(spec, testdir, capsys):\n testdir.makepyfile(\n \"\"\"\n def test_func(arg):\n pass\n \"\"\"\n )\n opt, expected_result = spec\n testdir.inline_run(\"-k\", opt)\n out = capsys.readouterr().err\n assert expected_result in out", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_parametrized_collected_from_command_line_test_parametrized_collected_from_command_line.rec_assertoutcome_passed_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_parametrized_collected_from_command_line_test_parametrized_collected_from_command_line.rec_assertoutcome_passed_", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 355, "end_line": 369, "span_ids": ["test_parametrized_collected_from_command_line"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parametrized_collected_from_command_line(testdir):\n \"\"\"Parametrized test not collected if test named specified\n in command line issue#649.\n \"\"\"\n py_file = testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.parametrize(\"arg\", [None, 1.3, \"2-3\"])\n def test_func(arg):\n pass\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n rec = testdir.inline_run(file_name + \"::\" + \"test_func\")\n rec.assertoutcome(passed=3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_parametrized_collect_with_wrong_args_test_parametrized_collect_with_wrong_args.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_parametrized_collect_with_wrong_args_test_parametrized_collect_with_wrong_args.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 372, "end_line": 392, "span_ids": ["test_parametrized_collect_with_wrong_args"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parametrized_collect_with_wrong_args(testdir):\n \"\"\"Test collect parametrized func with wrong number of args.\"\"\"\n py_file = testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.mark.parametrize('foo, bar', [(1, 2, 3)])\n def test_func(foo, bar):\n pass\n \"\"\"\n )\n\n result = testdir.runpytest(py_file)\n result.stdout.fnmatch_lines(\n [\n 'test_parametrized_collect_with_wrong_args.py::test_func: in \"parametrize\" the number of names (2):',\n \" ['foo', 'bar']\",\n \"must be equal to the number of values (3):\",\n \" (1, 2, 3)\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_parametrized_with_kwargs_test_parametrized_with_kwargs.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_parametrized_with_kwargs_test_parametrized_with_kwargs.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 395, "end_line": 412, "span_ids": ["test_parametrized_with_kwargs"], "tokens": 110}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parametrized_with_kwargs(testdir):\n \"\"\"Test collect parametrized func with wrong number of args.\"\"\"\n py_file = testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture(params=[1,2])\n def a(request):\n return request.param\n\n @pytest.mark.parametrize(argnames='b', argvalues=[1, 2])\n def test_func(a, b):\n pass\n \"\"\"\n )\n\n result = testdir.runpytest(py_file)\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional_TestFunctional.test_merging_markers_deep.for_item_in_items_.assert_x_for_x_in_item_i": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional_TestFunctional.test_merging_markers_deep.for_item_in_items_.assert_x_for_x_in_item_i", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 415, "end_line": 434, "span_ids": ["TestFunctional", "TestFunctional.test_merging_markers_deep"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFunctional(object):\n def test_merging_markers_deep(self, testdir):\n # issue 199 - propagate markers into nested classes\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n class TestA(object):\n pytestmark = pytest.mark.a\n def test_b(self):\n assert True\n class TestC(object):\n # this one didnt get marked\n def test_d(self):\n assert True\n \"\"\"\n )\n items, rec = testdir.inline_genitems(p)\n for item in items:\n print(item, item.keywords)\n assert [x for x in item.iter_markers() if x.name == \"a\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_mark_decorator_subclass_does_not_propagate_to_base_TestFunctional.test_mark_decorator_subclass_does_not_propagate_to_base.self_assert_markers_items": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_mark_decorator_subclass_does_not_propagate_to_base_TestFunctional.test_mark_decorator_subclass_does_not_propagate_to_base.self_assert_markers_items", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 436, "end_line": 453, "span_ids": ["TestFunctional.test_mark_decorator_subclass_does_not_propagate_to_base"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFunctional(object):\n\n def test_mark_decorator_subclass_does_not_propagate_to_base(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.mark.a\n class Base(object): pass\n\n @pytest.mark.b\n class Test1(Base):\n def test_foo(self): pass\n\n class Test2(Base):\n def test_bar(self): pass\n \"\"\"\n )\n items, rec = testdir.inline_genitems(p)\n self.assert_markers(items, test_foo=(\"a\", \"b\"), test_bar=(\"a\",))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_mark_should_not_pass_to_siebling_class_TestFunctional.test_mark_should_not_pass_to_siebling_class.assert_list_sub_item_iter": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_mark_should_not_pass_to_siebling_class_TestFunctional.test_mark_should_not_pass_to_siebling_class.assert_list_sub_item_iter", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 452, "end_line": 478, "span_ids": ["TestFunctional.test_mark_should_not_pass_to_siebling_class"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFunctional(object):\n\n @pytest.mark.issue(568)\n def test_mark_should_not_pass_to_siebling_class(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n\n class TestBase(object):\n def test_foo(self):\n pass\n\n @pytest.mark.b\n class TestSub(TestBase):\n pass\n\n\n class TestOtherSub(TestBase):\n pass\n\n \"\"\"\n )\n items, rec = testdir.inline_genitems(p)\n base_item, sub_item, sub_item_other = items\n print(items, [x.nodeid for x in items])\n # new api seregates\n assert not list(base_item.iter_markers(name=\"b\"))\n assert not list(sub_item_other.iter_markers(name=\"b\"))\n assert list(sub_item.iter_markers(name=\"b\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_mark_decorator_baseclasses_merged_TestFunctional.test_mark_decorator_baseclasses_merged.self_assert_markers_items": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_mark_decorator_baseclasses_merged_TestFunctional.test_mark_decorator_baseclasses_merged.self_assert_markers_items", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 483, "end_line": 504, "span_ids": ["TestFunctional.test_mark_decorator_baseclasses_merged"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFunctional(object):\n\n def test_mark_decorator_baseclasses_merged(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.mark.a\n class Base(object): pass\n\n @pytest.mark.b\n class Base2(Base): pass\n\n @pytest.mark.c\n class Test1(Base2):\n def test_foo(self): pass\n\n class Test2(Base2):\n @pytest.mark.d\n def test_bar(self): pass\n \"\"\"\n )\n items, rec = testdir.inline_genitems(p)\n self.assert_markers(items, test_foo=(\"a\", \"b\", \"c\"), test_bar=(\"a\", \"b\", \"d\"))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_mark_closest_TestFunctional.test_mark_closest.None_4": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_mark_closest_TestFunctional.test_mark_closest.None_4", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 506, "end_line": 526, "span_ids": ["TestFunctional.test_mark_closest"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFunctional(object):\n\n def test_mark_closest(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.mark.c(location=\"class\")\n class Test:\n @pytest.mark.c(location=\"function\")\n def test_has_own():\n pass\n\n def test_has_inherited():\n pass\n\n \"\"\"\n )\n items, rec = testdir.inline_genitems(p)\n has_own, has_inherited = items\n assert has_own.get_closest_marker(\"c\").kwargs == {\"location\": \"function\"}\n assert has_inherited.get_closest_marker(\"c\").kwargs == {\"location\": \"class\"}\n assert has_own.get_closest_marker(\"missing\") is None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_mark_with_wrong_marker_TestFunctional.test_mark_dynamically_in_funcarg.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_mark_with_wrong_marker_TestFunctional.test_mark_dynamically_in_funcarg.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 528, "end_line": 561, "span_ids": ["TestFunctional.test_mark_dynamically_in_funcarg", "TestFunctional.test_mark_with_wrong_marker"], "tokens": 222}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFunctional(object):\n\n def test_mark_with_wrong_marker(self, testdir):\n reprec = testdir.inline_runsource(\n \"\"\"\n import pytest\n class pytestmark(object):\n pass\n def test_func():\n pass\n \"\"\"\n )\n values = reprec.getfailedcollections()\n assert len(values) == 1\n assert \"TypeError\" in str(values[0].longrepr)\n\n def test_mark_dynamically_in_funcarg(self, testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n @pytest.fixture\n def arg(request):\n request.applymarker(pytest.mark.hello)\n def pytest_terminal_summary(terminalreporter):\n values = terminalreporter.stats['passed']\n terminalreporter._tw.line(\"keyword: %s\" % values[0].keywords)\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n def test_func(arg):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"keyword: *hello*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_no_marker_match_on_unmarked_names_TestFunctional.test_no_marker_match_on_unmarked_names.assert_len_deselected_tes": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_no_marker_match_on_unmarked_names_TestFunctional.test_no_marker_match_on_unmarked_names.assert_len_deselected_tes", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 563, "end_line": 580, "span_ids": ["TestFunctional.test_no_marker_match_on_unmarked_names"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFunctional(object):\n\n def test_no_marker_match_on_unmarked_names(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.shouldmatch\n def test_marked():\n assert 1\n\n def test_unmarked():\n assert 1\n \"\"\"\n )\n reprec = testdir.inline_run(\"-m\", \"test_unmarked\", p)\n passed, skipped, failed = reprec.listoutcomes()\n assert len(passed) + len(skipped) + len(failed) == 0\n dlist = reprec.getcalls(\"pytest_deselected\")\n deselected_tests = dlist[0].items\n assert len(deselected_tests) == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_invalid_m_option_TestFunctional.test_keywords_at_node_level.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_invalid_m_option_TestFunctional.test_keywords_at_node_level.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 582, "end_line": 614, "span_ids": ["TestFunctional.test_invalid_m_option", "TestFunctional.test_keywords_at_node_level"], "tokens": 206}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFunctional(object):\n\n def test_invalid_m_option(self, testdir):\n testdir.makepyfile(\n \"\"\"\n def test_a():\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"-m bogus/\")\n result.stdout.fnmatch_lines(\n [\"INTERNALERROR> Marker expression must be valid Python!\"]\n )\n\n def test_keywords_at_node_level(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture(scope=\"session\", autouse=True)\n def some(request):\n request.keywords[\"hello\"] = 42\n assert \"world\" not in request.keywords\n\n @pytest.fixture(scope=\"function\", autouse=True)\n def funcsetup(request):\n assert \"world\" in request.keywords\n assert \"hello\" in request.keywords\n\n @pytest.mark.world\n def test_function():\n pass\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_keyword_added_for_session_TestFunctional.test_keyword_added_for_session.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_keyword_added_for_session_TestFunctional.test_keyword_added_for_session.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 616, "end_line": 643, "span_ids": ["TestFunctional.test_keyword_added_for_session"], "tokens": 214}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFunctional(object):\n\n @ignore_markinfo\n def test_keyword_added_for_session(self, testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n def pytest_collection_modifyitems(session):\n session.add_marker(\"mark1\")\n session.add_marker(pytest.mark.mark2)\n session.add_marker(pytest.mark.mark3)\n pytest.raises(ValueError, lambda:\n session.add_marker(10))\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n def test_some(request):\n assert \"mark1\" in request.keywords\n assert \"mark2\" in request.keywords\n assert \"mark3\" in request.keywords\n assert 10 not in request.keywords\n marker = request.node.get_closest_marker(\"mark1\")\n assert marker.name == \"mark1\"\n assert marker.args == ()\n assert marker.kwargs == {}\n \"\"\"\n )\n reprec = testdir.inline_run(\"-m\", \"mark1\", SHOW_PYTEST_WARNINGS_ARG)\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.assert_markers_TestFunctional.assert_markers.for_name_expected_marker.assert_markers_set_exp": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.assert_markers_TestFunctional.assert_markers.for_name_expected_marker.assert_markers_set_exp", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 645, "end_line": 656, "span_ids": ["TestFunctional.assert_markers"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFunctional(object):\n\n def assert_markers(self, items, **expected):\n \"\"\"assert that given items have expected marker names applied to them.\n expected should be a dict of (item name -> seq of expected marker names)\n\n .. note:: this could be moved to ``testdir`` if proven to be useful\n to other modules.\n \"\"\"\n\n items = {x.name: x for x in items}\n for name, expected_markers in expected.items():\n markers = {m.name for m in items[name].iter_markers()}\n assert markers == set(expected_markers)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_mark_from_parameters_TestFunctional.test_mark_from_parameters.reprec_assertoutcome_skip": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestFunctional.test_mark_from_parameters_TestFunctional.test_mark_from_parameters.reprec_assertoutcome_skip", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 655, "end_line": 678, "span_ids": ["TestFunctional.test_mark_from_parameters"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFunctional(object):\n\n @pytest.mark.issue(1540)\n @pytest.mark.filterwarnings(\"ignore\")\n def test_mark_from_parameters(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n pytestmark = pytest.mark.skipif(True, reason='skip all')\n\n # skipifs inside fixture params\n params = [pytest.mark.skipif(False, reason='dont skip')('parameter')]\n\n\n @pytest.fixture(params=params)\n def parameter(request):\n return request.param\n\n\n def test_1(parameter):\n assert True\n \"\"\"\n )\n reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG)\n reprec.assertoutcome(skipped=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestKeywordSelection_TestKeywordSelection.test_select_simple.check_TestClass_and_test": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestKeywordSelection_TestKeywordSelection.test_select_simple.check_TestClass_and_test", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 684, "end_line": 705, "span_ids": ["TestKeywordSelection", "TestKeywordSelection.test_select_simple"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestKeywordSelection(object):\n def test_select_simple(self, testdir):\n file_test = testdir.makepyfile(\n \"\"\"\n def test_one():\n assert 0\n class TestClass(object):\n def test_method_one(self):\n assert 42 == 43\n \"\"\"\n )\n\n def check(keyword, name):\n reprec = testdir.inline_run(\"-s\", \"-k\", keyword, file_test)\n passed, skipped, failed = reprec.listoutcomes()\n assert len(failed) == 1\n assert failed[0].nodeid.split(\"::\")[-1] == name\n assert len(reprec.getcalls(\"pytest_deselected\")) == 1\n\n for keyword in [\"test_one\", \"est_on\"]:\n check(keyword, \"test_one\")\n check(\"TestClass and test\", \"test_method_one\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestKeywordSelection.test_select_extra_keywords_TestKeywordSelection.test_select_extra_keywords.assert_dlist_0_items_0_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestKeywordSelection.test_select_extra_keywords_TestKeywordSelection.test_select_extra_keywords.assert_dlist_0_items_0_", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 707, "end_line": 746, "span_ids": ["TestKeywordSelection.test_select_extra_keywords"], "tokens": 298}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestKeywordSelection(object):\n\n @pytest.mark.parametrize(\n \"keyword\",\n [\n \"xxx\",\n \"xxx and test_2\",\n \"TestClass\",\n \"xxx and not test_1\",\n \"TestClass and test_2\",\n \"xxx and TestClass and test_2\",\n ],\n )\n def test_select_extra_keywords(self, testdir, keyword):\n p = testdir.makepyfile(\n test_select=\"\"\"\n def test_1():\n pass\n class TestClass(object):\n def test_2(self):\n pass\n \"\"\"\n )\n testdir.makepyfile(\n conftest=\"\"\"\n import pytest\n @pytest.hookimpl(hookwrapper=True)\n def pytest_pycollect_makeitem(name):\n outcome = yield\n if name == \"TestClass\":\n item = outcome.get_result()\n item.extra_keyword_matches.add(\"xxx\")\n \"\"\"\n )\n reprec = testdir.inline_run(p.dirpath(), \"-s\", \"-k\", keyword)\n print(\"keyword\", repr(keyword))\n passed, skipped, failed = reprec.listoutcomes()\n assert len(passed) == 1\n assert passed[0].nodeid.endswith(\"test_2\")\n dlist = reprec.getcalls(\"pytest_deselected\")\n assert len(dlist) == 1\n assert dlist[0].items[0].name == \"test_1\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestKeywordSelection.test_select_starton_TestKeywordSelection.test_select_starton.assert_item_name_test": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestKeywordSelection.test_select_starton_TestKeywordSelection.test_select_starton.assert_item_name_test", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 748, "end_line": 763, "span_ids": ["TestKeywordSelection.test_select_starton"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestKeywordSelection(object):\n\n def test_select_starton(self, testdir):\n threepass = testdir.makepyfile(\n test_threepass=\"\"\"\n def test_one(): assert 1\n def test_two(): assert 1\n def test_three(): assert 1\n \"\"\"\n )\n reprec = testdir.inline_run(\"-k\", \"test_two:\", threepass)\n passed, skipped, failed = reprec.listoutcomes()\n assert len(passed) == 2\n assert not failed\n dlist = reprec.getcalls(\"pytest_deselected\")\n assert len(dlist) == 1\n item = dlist[0].items[0]\n assert item.name == \"test_one\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestKeywordSelection.test_keyword_extra_TestKeywordSelection.test_keyword_extra_dash.assert_passed_skipped_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestKeywordSelection.test_keyword_extra_TestKeywordSelection.test_keyword_extra_dash.assert_passed_skipped_", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 765, "end_line": 790, "span_ids": ["TestKeywordSelection.test_keyword_extra_dash", "TestKeywordSelection.test_keyword_extra"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestKeywordSelection(object):\n\n def test_keyword_extra(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n def test_one():\n assert 0\n test_one.mykeyword = True\n \"\"\"\n )\n reprec = testdir.inline_run(\"-k\", \"mykeyword\", p)\n passed, skipped, failed = reprec.countoutcomes()\n assert failed == 1\n\n @pytest.mark.xfail\n def test_keyword_extra_dash(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n def test_one():\n assert 0\n test_one.mykeyword = True\n \"\"\"\n )\n # with argparse the argument to an option cannot\n # start with '-'\n reprec = testdir.inline_run(\"-k\", \"-mykeyword\", p)\n passed, skipped, failed = reprec.countoutcomes()\n assert passed + skipped + failed == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestKeywordSelection.test_no_magic_values_TestMarkDecorator.test__eq__.assert_lhs_rhs_ex": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_TestKeywordSelection.test_no_magic_values_TestMarkDecorator.test__eq__.assert_lhs_rhs_ex", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 792, "end_line": 826, "span_ids": ["TestKeywordSelection.test_no_magic_values", "TestMarkDecorator", "TestMarkDecorator.test__eq__"], "tokens": 275}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestKeywordSelection(object):\n\n def test_no_magic_values(self, testdir):\n \"\"\"Make sure the tests do not match on magic values,\n no double underscored values, like '__dict__',\n and no instance values, like '()'.\n \"\"\"\n p = testdir.makepyfile(\n \"\"\"\n def test_one(): assert 1\n \"\"\"\n )\n\n def assert_test_is_not_selected(keyword):\n reprec = testdir.inline_run(\"-k\", keyword, p)\n passed, skipped, failed = reprec.countoutcomes()\n dlist = reprec.getcalls(\"pytest_deselected\")\n assert passed + skipped + failed == 0\n deselected_tests = dlist[0].items\n assert len(deselected_tests) == 1\n\n assert_test_is_not_selected(\"__\")\n assert_test_is_not_selected(\"()\")\n\n\nclass TestMarkDecorator(object):\n @pytest.mark.parametrize(\n \"lhs, rhs, expected\",\n [\n (pytest.mark.foo(), pytest.mark.foo(), True),\n (pytest.mark.foo(), pytest.mark.bar(), False),\n (pytest.mark.foo(), \"bar\", False),\n (\"foo\", pytest.mark.bar(), False),\n ],\n )\n def test__eq__(self, lhs, rhs, expected):\n assert (lhs == rhs) == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_parameterset_for_parametrize_marks_test_parameterset_for_parametrize_marks.if_mark_xfail_.assert_result_mark_kwargs": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_parameterset_for_parametrize_marks_test_parameterset_for_parametrize_marks.if_mark_xfail_.assert_result_mark_kwargs", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 829, "end_line": 852, "span_ids": ["test_parameterset_for_parametrize_marks"], "tokens": 177}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"mark\", [None, \"\", \"skip\", \"xfail\"])\ndef test_parameterset_for_parametrize_marks(testdir, mark):\n if mark is not None:\n testdir.makeini(\n \"\"\"\n [pytest]\n {}={}\n \"\"\".format(\n EMPTY_PARAMETERSET_OPTION, mark\n )\n )\n\n config = testdir.parseconfig()\n from _pytest.mark import pytest_configure, get_empty_parameterset_mark\n\n pytest_configure(config)\n result_mark = get_empty_parameterset_mark(config, [\"a\"], all)\n if mark in (None, \"\"):\n # normalize to the requested name\n mark = \"skip\"\n assert result_mark.name == mark\n assert result_mark.kwargs[\"reason\"].startswith(\"got empty parameter set \")\n if mark == \"xfail\":\n assert result_mark.kwargs.get(\"run\") is False", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_parameterset_for_fail_at_collect_test_parameterset_for_fail_at_collect.assert_result_ret_EXIT": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_parameterset_for_fail_at_collect_test_parameterset_for_fail_at_collect.assert_result_ret_EXIT", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 855, "end_line": 894, "span_ids": ["test_parameterset_for_fail_at_collect"], "tokens": 235}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parameterset_for_fail_at_collect(testdir):\n testdir.makeini(\n \"\"\"\n [pytest]\n {}=fail_at_collect\n \"\"\".format(\n EMPTY_PARAMETERSET_OPTION\n )\n )\n\n config = testdir.parseconfig()\n from _pytest.mark import pytest_configure, get_empty_parameterset_mark\n\n pytest_configure(config)\n\n with pytest.raises(\n Collector.CollectError,\n match=r\"Empty parameter set in 'pytest_configure' at line \\d\\d+\",\n ):\n get_empty_parameterset_mark(config, [\"a\"], pytest_configure)\n\n p1 = testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.mark.parametrize(\"empty\", [])\n def test():\n pass\n \"\"\"\n )\n result = testdir.runpytest(str(p1))\n result.stdout.fnmatch_lines(\n [\n \"collected 0 items / 1 errors\",\n \"* ERROR collecting test_parameterset_for_fail_at_collect.py *\",\n \"Empty parameter set in 'test' at line 3\",\n \"*= 1 error in *\",\n ]\n )\n assert result.ret == EXIT_INTERRUPTED", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_markers_from_parametrize_test_markers_from_parametrize.result_assert_outcomes_pa": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_markers_from_parametrize_test_markers_from_parametrize.result_assert_outcomes_pa", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 943, "end_line": 979, "span_ids": ["test_markers_from_parametrize"], "tokens": 266}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.issue(\"https://github.com/pytest-dev/pytest/issues/3605\")\n@pytest.mark.filterwarnings(\"ignore\")\ndef test_markers_from_parametrize(testdir):\n testdir.makepyfile(\n \"\"\"\n from __future__ import print_function\n import pytest\n\n first_custom_mark = pytest.mark.custom_marker\n custom_mark = pytest.mark.custom_mark\n @pytest.fixture(autouse=True)\n def trigger(request):\n custom_mark = list(request.node.iter_markers('custom_mark'))\n print(\"Custom mark %s\" % custom_mark)\n\n @custom_mark(\"custom mark non parametrized\")\n def test_custom_mark_non_parametrized():\n print(\"Hey from test\")\n\n @pytest.mark.parametrize(\n \"obj_type\",\n [\n first_custom_mark(\"first custom mark\")(\"template\"),\n pytest.param( # Think this should be recommended way?\n \"disk\",\n marks=custom_mark('custom mark1')\n ),\n custom_mark(\"custom mark2\")(\"vm\"), # Tried also this\n ]\n )\n def test_custom_mark_parametrized(obj_type):\n print(\"obj_type is:\", obj_type)\n \"\"\"\n )\n\n result = testdir.runpytest(SHOW_PYTEST_WARNINGS_ARG)\n result.assert_outcomes(passed=4)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_pytest_param_id_requires_string_test_pytest_param_id_allows_none_or_string.assert_pytest_param_id_s_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_pytest_param_id_requires_string_test_pytest_param_id_allows_none_or_string.assert_pytest_param_id_s_", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 985, "end_line": 997, "span_ids": ["test_pytest_param_id_requires_string", "test_pytest_param_id_allows_none_or_string"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pytest_param_id_requires_string():\n with pytest.raises(TypeError) as excinfo:\n pytest.param(id=True)\n msg, = excinfo.value.args\n if six.PY2:\n assert msg == \"Expected id to be a string, got : True\"\n else:\n assert msg == \"Expected id to be a string, got : True\"\n\n\n@pytest.mark.parametrize(\"s\", (None, \"hello world\"))\ndef test_pytest_param_id_allows_none_or_string(s):\n assert pytest.param(id=s)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_pytest_param_warning_on_unknown_kwargs_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_pytest_param_warning_on_unknown_kwargs_", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 1000, "end_line": 1010, "span_ids": ["test_pytest_param_warning_on_unknown_kwargs"], "tokens": 108}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pytest_param_warning_on_unknown_kwargs():\n with pytest.warns(PytestDeprecationWarning) as warninfo:\n # typo, should be marks=\n pytest.param(1, 2, mark=pytest.mark.xfail())\n assert warninfo[0].filename == __file__\n msg, = warninfo[0].message.args\n assert msg == (\n \"pytest.param() got unexpected keyword arguments: ['mark'].\\n\"\n \"This will be an error in future versions.\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_modimport.py_subprocess_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_modimport.py_subprocess_", "embedding": null, "metadata": {"file_path": "testing/test_modimport.py", "file_name": "test_modimport.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 41, "span_ids": ["imports", "test_fileimport", "impl"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import subprocess\nimport sys\n\nimport py\n\nimport _pytest\nimport pytest\n\npytestmark = pytest.mark.slow\n\nMODSET = [\n x\n for x in py.path.local(_pytest.__file__).dirpath().visit(\"*.py\")\n if x.purebasename != \"__init__\"\n]\n\n\n@pytest.mark.parametrize(\"modfile\", MODSET, ids=lambda x: x.purebasename)\ndef test_fileimport(modfile):\n # this test ensures all internal packages can import\n # without needing the pytest namespace being set\n # this is critical for the initialization of xdist\n\n p = subprocess.Popen(\n [\n sys.executable,\n \"-c\",\n \"import sys, py; py.path.local(sys.argv[1]).pyimport()\",\n modfile.strpath,\n ],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n (out, err) = p.communicate()\n assert p.returncode == 0, \"importing %s failed (exitcode %d): out=%r, err=%r\" % (\n modfile,\n p.returncode,\n out,\n err,\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_from___future___import_ab_test_setattr.assert_A_x_5": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_from___future___import_ab_test_setattr.assert_A_x_5", "embedding": null, "metadata": {"file_path": "testing/test_monkeypatch.py", "file_name": "test_monkeypatch.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 46, "span_ids": ["test_setattr.A:2", "test_setattr", "test_setattr.A", "imports", "mp"], "tokens": 262}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport re\nimport sys\nimport textwrap\n\nimport six\n\nimport pytest\nfrom _pytest.monkeypatch import MonkeyPatch\n\n\n@pytest.fixture\ndef mp():\n cwd = os.getcwd()\n sys_path = list(sys.path)\n yield MonkeyPatch()\n sys.path[:] = sys_path\n os.chdir(cwd)\n\n\ndef test_setattr():\n class A(object):\n x = 1\n\n monkeypatch = MonkeyPatch()\n pytest.raises(AttributeError, monkeypatch.setattr, A, \"notexists\", 2)\n monkeypatch.setattr(A, \"y\", 2, raising=False)\n assert A.y == 2\n monkeypatch.undo()\n assert not hasattr(A, \"y\")\n\n monkeypatch = MonkeyPatch()\n monkeypatch.setattr(A, \"x\", 2)\n assert A.x == 2\n monkeypatch.setattr(A, \"x\", 3)\n assert A.x == 3\n monkeypatch.undo()\n assert A.x == 1\n\n A.x = 5\n monkeypatch.undo() # double-undo makes no modification\n assert A.x == 5", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_TestSetattrWithImportPath_TestSetattrWithImportPath.test_delattr.assert_os_path_abspath": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_TestSetattrWithImportPath_TestSetattrWithImportPath.test_delattr.assert_os_path_abspath", "embedding": null, "metadata": {"file_path": "testing/test_monkeypatch.py", "file_name": "test_monkeypatch.py", "file_type": "text/x-python", "category": "test", "start_line": 49, "end_line": 87, "span_ids": ["TestSetattrWithImportPath", "TestSetattrWithImportPath.test_string_expression_class", "TestSetattrWithImportPath.test_delattr", "TestSetattrWithImportPath.test_unknown_import", "TestSetattrWithImportPath.test_wrong_target", "TestSetattrWithImportPath.test_unicode_string", "TestSetattrWithImportPath.test_string_expression", "TestSetattrWithImportPath.test_unknown_attr_non_raising", "TestSetattrWithImportPath.test_unknown_attr"], "tokens": 328}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSetattrWithImportPath(object):\n def test_string_expression(self, monkeypatch):\n monkeypatch.setattr(\"os.path.abspath\", lambda x: \"hello2\")\n assert os.path.abspath(\"123\") == \"hello2\"\n\n def test_string_expression_class(self, monkeypatch):\n monkeypatch.setattr(\"_pytest.config.Config\", 42)\n import _pytest\n\n assert _pytest.config.Config == 42\n\n def test_unicode_string(self, monkeypatch):\n monkeypatch.setattr(\"_pytest.config.Config\", 42)\n import _pytest\n\n assert _pytest.config.Config == 42\n monkeypatch.delattr(\"_pytest.config.Config\")\n\n def test_wrong_target(self, monkeypatch):\n pytest.raises(TypeError, lambda: monkeypatch.setattr(None, None))\n\n def test_unknown_import(self, monkeypatch):\n pytest.raises(ImportError, lambda: monkeypatch.setattr(\"unkn123.classx\", None))\n\n def test_unknown_attr(self, monkeypatch):\n pytest.raises(\n AttributeError, lambda: monkeypatch.setattr(\"os.path.qweqwe\", None)\n )\n\n def test_unknown_attr_non_raising(self, monkeypatch):\n # https://github.com/pytest-dev/pytest/issues/746\n monkeypatch.setattr(\"os.path.qweqwe\", 42, raising=False)\n assert os.path.qweqwe == 42\n\n def test_delattr(self, monkeypatch):\n monkeypatch.delattr(\"os.path.abspath\")\n assert not hasattr(os.path, \"abspath\")\n monkeypatch.undo()\n assert os.path.abspath", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_delattr_test_delattr.None_3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_delattr_test_delattr.None_3", "embedding": null, "metadata": {"file_path": "testing/test_monkeypatch.py", "file_name": "test_monkeypatch.py", "file_type": "text/x-python", "category": "test", "start_line": 90, "end_line": 107, "span_ids": ["test_delattr.A:2", "test_delattr", "test_delattr.A"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_delattr():\n class A(object):\n x = 1\n\n monkeypatch = MonkeyPatch()\n monkeypatch.delattr(A, \"x\")\n assert not hasattr(A, \"x\")\n monkeypatch.undo()\n assert A.x == 1\n\n monkeypatch = MonkeyPatch()\n monkeypatch.delattr(A, \"x\")\n pytest.raises(AttributeError, monkeypatch.delattr, A, \"y\")\n monkeypatch.delattr(A, \"y\", raising=False)\n monkeypatch.setattr(A, \"x\", 5, raising=False)\n assert A.x == 5\n monkeypatch.undo()\n assert A.x == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_setitem_test_setitem.assert_d_x_5": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_setitem_test_setitem.assert_d_x_5", "embedding": null, "metadata": {"file_path": "testing/test_monkeypatch.py", "file_name": "test_monkeypatch.py", "file_type": "text/x-python", "category": "test", "start_line": 110, "end_line": 125, "span_ids": ["test_setitem"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_setitem():\n d = {\"x\": 1}\n monkeypatch = MonkeyPatch()\n monkeypatch.setitem(d, \"x\", 2)\n monkeypatch.setitem(d, \"y\", 1700)\n monkeypatch.setitem(d, \"y\", 1700)\n assert d[\"x\"] == 2\n assert d[\"y\"] == 1700\n monkeypatch.setitem(d, \"x\", 3)\n assert d[\"x\"] == 3\n monkeypatch.undo()\n assert d[\"x\"] == 1\n assert \"y\" not in d\n d[\"x\"] = 5\n monkeypatch.undo()\n assert d[\"x\"] == 5", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_setitem_deleted_meanwhile_test_setenv_deleted_meanwhile.None_1.else_.assert_key_not_in_os_envi": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_setitem_deleted_meanwhile_test_setenv_deleted_meanwhile.None_1.else_.assert_key_not_in_os_envi", "embedding": null, "metadata": {"file_path": "testing/test_monkeypatch.py", "file_name": "test_monkeypatch.py", "file_type": "text/x-python", "category": "test", "start_line": 128, "end_line": 150, "span_ids": ["test_setitem_deleted_meanwhile", "test_setenv_deleted_meanwhile"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_setitem_deleted_meanwhile():\n d = {}\n monkeypatch = MonkeyPatch()\n monkeypatch.setitem(d, \"x\", 2)\n del d[\"x\"]\n monkeypatch.undo()\n assert not d\n\n\n@pytest.mark.parametrize(\"before\", [True, False])\ndef test_setenv_deleted_meanwhile(before):\n key = \"qwpeoip123\"\n if before:\n os.environ[key] = \"world\"\n monkeypatch = MonkeyPatch()\n monkeypatch.setenv(key, \"hello\")\n del os.environ[key]\n monkeypatch.undo()\n if before:\n assert os.environ[key] == \"world\"\n del os.environ[key]\n else:\n assert key not in os.environ", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_delitem_test_delitem.assert_d_hello_wo": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_delitem_test_delitem.assert_d_hello_wo", "embedding": null, "metadata": {"file_path": "testing/test_monkeypatch.py", "file_name": "test_monkeypatch.py", "file_type": "text/x-python", "category": "test", "start_line": 153, "end_line": 167, "span_ids": ["test_delitem"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_delitem():\n d = {\"x\": 1}\n monkeypatch = MonkeyPatch()\n monkeypatch.delitem(d, \"x\")\n assert \"x\" not in d\n monkeypatch.delitem(d, \"y\", raising=False)\n pytest.raises(KeyError, monkeypatch.delitem, d, \"y\")\n assert not d\n monkeypatch.setitem(d, \"y\", 1700)\n assert d[\"y\"] == 1700\n d[\"hello\"] = \"world\"\n monkeypatch.setitem(d, \"x\", 1500)\n assert d[\"x\"] == 1500\n monkeypatch.undo()\n assert d == {\"hello\": \"world\", \"x\": 1}", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_setenv_test_delenv.try_.finally_.if_name_in_os_environ_.del_os_environ_name_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_setenv_test_delenv.try_.finally_.if_name_in_os_environ_.del_os_environ_name_", "embedding": null, "metadata": {"file_path": "testing/test_monkeypatch.py", "file_name": "test_monkeypatch.py", "file_type": "text/x-python", "category": "test", "start_line": 170, "end_line": 199, "span_ids": ["test_setenv", "test_delenv"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_setenv():\n monkeypatch = MonkeyPatch()\n with pytest.warns(pytest.PytestWarning):\n monkeypatch.setenv(\"XYZ123\", 2)\n import os\n\n assert os.environ[\"XYZ123\"] == \"2\"\n monkeypatch.undo()\n assert \"XYZ123\" not in os.environ\n\n\ndef test_delenv():\n name = \"xyz1234\"\n assert name not in os.environ\n monkeypatch = MonkeyPatch()\n pytest.raises(KeyError, monkeypatch.delenv, name, raising=True)\n monkeypatch.delenv(name, raising=False)\n monkeypatch.undo()\n os.environ[name] = \"1\"\n try:\n monkeypatch = MonkeyPatch()\n monkeypatch.delenv(name)\n assert name not in os.environ\n monkeypatch.setenv(name, \"3\")\n assert os.environ[name] == \"3\"\n monkeypatch.undo()\n assert os.environ[name] == \"1\"\n finally:\n if name in os.environ:\n del os.environ[name]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_TestEnvironWarnings_TestEnvironWarnings.test_setenv_non_str_warning.with_pytest_warns_pytest_.monkeypatch_setenv_str_se": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_TestEnvironWarnings_TestEnvironWarnings.test_setenv_non_str_warning.with_pytest_warns_pytest_.monkeypatch_setenv_str_se", "embedding": null, "metadata": {"file_path": "testing/test_monkeypatch.py", "file_name": "test_monkeypatch.py", "file_type": "text/x-python", "category": "test", "start_line": 202, "end_line": 234, "span_ids": ["TestEnvironWarnings.test_setenv_non_str_warning", "TestEnvironWarnings.test_setenv_unicode_key", "TestEnvironWarnings.test_delenv_unicode_key", "TestEnvironWarnings"], "tokens": 321}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestEnvironWarnings(object):\n \"\"\"\n os.environ keys and values should be native strings, otherwise it will cause problems with other modules (notably\n subprocess). On Python 2 os.environ accepts anything without complaining, while Python 3 does the right thing\n and raises an error.\n \"\"\"\n\n VAR_NAME = u\"PYTEST_INTERNAL_MY_VAR\"\n\n @pytest.mark.skipif(six.PY3, reason=\"Python 2 only test\")\n def test_setenv_unicode_key(self, monkeypatch):\n with pytest.warns(\n pytest.PytestWarning,\n match=\"Environment variable name {!r} should be str\".format(self.VAR_NAME),\n ):\n monkeypatch.setenv(self.VAR_NAME, \"2\")\n\n @pytest.mark.skipif(six.PY3, reason=\"Python 2 only test\")\n def test_delenv_unicode_key(self, monkeypatch):\n with pytest.warns(\n pytest.PytestWarning,\n match=\"Environment variable name {!r} should be str\".format(self.VAR_NAME),\n ):\n monkeypatch.delenv(self.VAR_NAME, raising=False)\n\n def test_setenv_non_str_warning(self, monkeypatch):\n value = 2\n msg = (\n \"Value of environment variable PYTEST_INTERNAL_MY_VAR type should be str, \"\n \"but got 2 (type: int); converted to str implicitly\"\n )\n with pytest.warns(pytest.PytestWarning, match=re.escape(msg)):\n monkeypatch.setenv(str(self.VAR_NAME), value)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_setenv_prepend_test_setenv_prepend.assert_XYZ123_not_in_os": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_setenv_prepend_test_setenv_prepend.assert_XYZ123_not_in_os", "embedding": null, "metadata": {"file_path": "testing/test_monkeypatch.py", "file_name": "test_monkeypatch.py", "file_type": "text/x-python", "category": "test", "start_line": 237, "end_line": 248, "span_ids": ["test_setenv_prepend"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_setenv_prepend():\n import os\n\n monkeypatch = MonkeyPatch()\n with pytest.warns(pytest.PytestWarning):\n monkeypatch.setenv(\"XYZ123\", 2, prepend=\"-\")\n assert os.environ[\"XYZ123\"] == \"2\"\n with pytest.warns(pytest.PytestWarning):\n monkeypatch.setenv(\"XYZ123\", 3, prepend=\"-\")\n assert os.environ[\"XYZ123\"] == \"3-2\"\n monkeypatch.undo()\n assert \"XYZ123\" not in os.environ", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_monkeypatch_plugin_test_issue185_time_breaks.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_monkeypatch_plugin_test_issue185_time_breaks.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_monkeypatch.py", "file_name": "test_monkeypatch.py", "file_type": "text/x-python", "category": "test", "start_line": 251, "end_line": 326, "span_ids": ["test_chdir_undo", "test_syspath_prepend", "test_syspath_prepend_double_undo", "test_chdir_with_path_local", "test_issue185_time_breaks", "test_chdir_double_undo", "test_chdir_with_str", "test_monkeypatch_plugin"], "tokens": 440}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_monkeypatch_plugin(testdir):\n reprec = testdir.inline_runsource(\n \"\"\"\n def test_method(monkeypatch):\n assert monkeypatch.__class__.__name__ == \"MonkeyPatch\"\n \"\"\"\n )\n res = reprec.countoutcomes()\n assert tuple(res) == (1, 0, 0), res\n\n\ndef test_syspath_prepend(mp):\n old = list(sys.path)\n mp.syspath_prepend(\"world\")\n mp.syspath_prepend(\"hello\")\n assert sys.path[0] == \"hello\"\n assert sys.path[1] == \"world\"\n mp.undo()\n assert sys.path == old\n mp.undo()\n assert sys.path == old\n\n\ndef test_syspath_prepend_double_undo(mp):\n old_syspath = sys.path[:]\n try:\n mp.syspath_prepend(\"hello world\")\n mp.undo()\n sys.path.append(\"more hello world\")\n mp.undo()\n assert sys.path[-1] == \"more hello world\"\n finally:\n sys.path[:] = old_syspath\n\n\ndef test_chdir_with_path_local(mp, tmpdir):\n mp.chdir(tmpdir)\n assert os.getcwd() == tmpdir.strpath\n\n\ndef test_chdir_with_str(mp, tmpdir):\n mp.chdir(tmpdir.strpath)\n assert os.getcwd() == tmpdir.strpath\n\n\ndef test_chdir_undo(mp, tmpdir):\n cwd = os.getcwd()\n mp.chdir(tmpdir)\n mp.undo()\n assert os.getcwd() == cwd\n\n\ndef test_chdir_double_undo(mp, tmpdir):\n mp.chdir(tmpdir.strpath)\n mp.undo()\n tmpdir.chdir()\n mp.undo()\n assert os.getcwd() == tmpdir.strpath\n\n\ndef test_issue185_time_breaks(testdir):\n testdir.makepyfile(\n \"\"\"\n import time\n def test_m(monkeypatch):\n def f():\n raise Exception\n monkeypatch.setattr(time, \"time\", f)\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n \"\"\"\n *1 passed*\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_importerror_test_importerror.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_importerror_test_importerror.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_monkeypatch.py", "file_name": "test_monkeypatch.py", "file_type": "text/x-python", "category": "test", "start_line": 329, "end_line": 355, "span_ids": ["test_importerror"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_importerror(testdir):\n p = testdir.mkpydir(\"package\")\n p.join(\"a.py\").write(\n textwrap.dedent(\n \"\"\"\\\n import doesnotexist\n\n x = 1\n \"\"\"\n )\n )\n testdir.tmpdir.join(\"test_importerror.py\").write(\n textwrap.dedent(\n \"\"\"\\\n def test_importerror(monkeypatch):\n monkeypatch.setattr('package.a.x', 2)\n \"\"\"\n )\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n \"\"\"\n *import error in package.a: No module named {0}doesnotexist{0}*\n \"\"\".format(\n \"'\" if sys.version_info > (3, 0) else \"\"\n )\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_SampleNew_test_issue156_undo_staticmethod.assert_Sample_hello_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_SampleNew_test_issue156_undo_staticmethod.assert_Sample_hello_", "embedding": null, "metadata": {"file_path": "testing/test_monkeypatch.py", "file_name": "test_monkeypatch.py", "file_type": "text/x-python", "category": "test", "start_line": 358, "end_line": 391, "span_ids": ["SampleOld.hello", "SampleNewInherit", "test_issue156_undo_staticmethod", "SampleNew.hello", "SampleOld", "SampleNew", "SampleOldInherit"], "tokens": 160}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SampleNew(object):\n @staticmethod\n def hello():\n return True\n\n\nclass SampleNewInherit(SampleNew):\n pass\n\n\nclass SampleOld(object):\n # oldstyle on python2\n @staticmethod\n def hello():\n return True\n\n\nclass SampleOldInherit(SampleOld):\n pass\n\n\n@pytest.mark.parametrize(\n \"Sample\",\n [SampleNew, SampleNewInherit, SampleOld, SampleOldInherit],\n ids=[\"new\", \"new-inherit\", \"old\", \"old-inherit\"],\n)\ndef test_issue156_undo_staticmethod(Sample):\n monkeypatch = MonkeyPatch()\n\n monkeypatch.setattr(Sample, \"hello\", None)\n assert Sample.hello is None\n\n monkeypatch.undo()\n assert Sample.hello()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_undo_class_descriptors_delattr_test_context.assert_inspect_isclass_fu": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_undo_class_descriptors_delattr_test_context.assert_inspect_isclass_fu", "embedding": null, "metadata": {"file_path": "testing/test_monkeypatch.py", "file_name": "test_monkeypatch.py", "file_type": "text/x-python", "category": "test", "start_line": 394, "end_line": 439, "span_ids": ["test_undo_class_descriptors_delattr.SampleParent", "test_undo_class_descriptors_delattr.SampleChild:2", "test_undo_class_descriptors_delattr.SampleParent.hello", "test_undo_class_descriptors_delattr", "test_undo_class_descriptors_delattr.SampleChild", "test_context", "test_issue1338_name_resolving"], "tokens": 261}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_undo_class_descriptors_delattr():\n class SampleParent(object):\n @classmethod\n def hello(_cls):\n pass\n\n @staticmethod\n def world():\n pass\n\n class SampleChild(SampleParent):\n pass\n\n monkeypatch = MonkeyPatch()\n\n original_hello = SampleChild.hello\n original_world = SampleChild.world\n monkeypatch.delattr(SampleParent, \"hello\")\n monkeypatch.delattr(SampleParent, \"world\")\n assert getattr(SampleParent, \"hello\", None) is None\n assert getattr(SampleParent, \"world\", None) is None\n\n monkeypatch.undo()\n assert original_hello == SampleChild.hello\n assert original_world == SampleChild.world\n\n\ndef test_issue1338_name_resolving():\n pytest.importorskip(\"requests\")\n monkeypatch = MonkeyPatch()\n try:\n monkeypatch.delattr(\"requests.sessions.Session.request\")\n finally:\n monkeypatch.undo()\n\n\ndef test_context():\n monkeypatch = MonkeyPatch()\n\n import functools\n import inspect\n\n with monkeypatch.context() as m:\n m.setattr(functools, \"partial\", 3)\n assert not inspect.isclass(functools.partial)\n assert inspect.isclass(functools.partial)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_syspath_prepend_with_namespace_packages_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_monkeypatch.py_test_syspath_prepend_with_namespace_packages_", "embedding": null, "metadata": {"file_path": "testing/test_monkeypatch.py", "file_name": "test_monkeypatch.py", "file_type": "text/x-python", "category": "test", "start_line": 442, "end_line": 472, "span_ids": ["test_syspath_prepend_with_namespace_packages"], "tokens": 250}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_syspath_prepend_with_namespace_packages(testdir, monkeypatch):\n for dirname in \"hello\", \"world\":\n d = testdir.mkdir(dirname)\n ns = d.mkdir(\"ns_pkg\")\n ns.join(\"__init__.py\").write(\n \"__import__('pkg_resources').declare_namespace(__name__)\"\n )\n lib = ns.mkdir(dirname)\n lib.join(\"__init__.py\").write(\"def check(): return %r\" % dirname)\n\n monkeypatch.syspath_prepend(\"hello\")\n import ns_pkg.hello\n\n assert ns_pkg.hello.check() == \"hello\"\n\n with pytest.raises(ImportError):\n import ns_pkg.world\n\n # Prepending should call fixup_namespace_packages.\n monkeypatch.syspath_prepend(\"world\")\n import ns_pkg.world\n\n assert ns_pkg.world.check() == \"world\"\n\n # Should invalidate caches via importlib.invalidate_caches.\n tmpdir = testdir.tmpdir\n modules_tmpdir = tmpdir.mkdir(\"modules_tmpdir\")\n monkeypatch.syspath_prepend(str(modules_tmpdir))\n modules_tmpdir.join(\"main_app.py\").write(\"app = True\")\n from main_app import app # noqa: F401", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nodes.py_py_test_ischildnode.assert_result_is_expected": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nodes.py_py_test_ischildnode.assert_result_is_expected", "embedding": null, "metadata": {"file_path": "testing/test_nodes.py", "file_name": "test_nodes.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 22, "span_ids": ["test_ischildnode", "imports"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import py\n\nimport pytest\nfrom _pytest import nodes\n\n\n@pytest.mark.parametrize(\n \"baseid, nodeid, expected\",\n (\n (\"\", \"\", True),\n (\"\", \"foo\", True),\n (\"\", \"foo/bar\", True),\n (\"\", \"foo/bar::TestBaz\", True),\n (\"foo\", \"food\", False),\n (\"foo/bar::TestBaz\", \"foo/bar\", False),\n (\"foo/bar::TestBaz\", \"foo/bar::TestBop\", False),\n (\"foo/bar\", \"foo/bar::TestBop\", True),\n ),\n)\ndef test_ischildnode(baseid, nodeid, expected):\n result = nodes.ischildnode(baseid, nodeid)\n assert result is expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nodes.py_test_std_warn_not_pytestwarning_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nodes.py_test_std_warn_not_pytestwarning_", "embedding": null, "metadata": {"file_path": "testing/test_nodes.py", "file_name": "test_nodes.py", "file_type": "text/x-python", "category": "test", "start_line": 25, "end_line": 54, "span_ids": ["test__check_initialpaths_for_relpath.FakeSession:2", "test__check_initialpaths_for_relpath.FakeSession", "test__check_initialpaths_for_relpath.FakeSession_1", "test__check_initialpaths_for_relpath.FakeSession_1:2", "test_std_warn_not_pytestwarning", "test__check_initialpaths_for_relpath"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_std_warn_not_pytestwarning(testdir):\n items = testdir.getitems(\n \"\"\"\n def test():\n pass\n \"\"\"\n )\n with pytest.raises(ValueError, match=\".*instance of PytestWarning.*\"):\n items[0].warn(UserWarning(\"some warning\"))\n\n\ndef test__check_initialpaths_for_relpath():\n \"\"\"Ensure that it handles dirs, and does not always use dirname.\"\"\"\n cwd = py.path.local()\n\n class FakeSession:\n _initialpaths = [cwd]\n\n assert nodes._check_initialpaths_for_relpath(FakeSession, cwd) == \"\"\n\n sub = cwd.join(\"file\")\n\n class FakeSession:\n _initialpaths = [cwd]\n\n assert nodes._check_initialpaths_for_relpath(FakeSession, sub) == \"file\"\n\n outside = py.path.local(\"/outside\")\n assert nodes._check_initialpaths_for_relpath(FakeSession, outside) is None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py__encoding_utf_8_test_nose_setup.result_assert_outcomes_pa": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py__encoding_utf_8_test_nose_setup.result_assert_outcomes_pa", "embedding": null, "metadata": {"file_path": "testing/test_nose.py", "file_name": "test_nose.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 31, "span_ids": ["test_nose_setup", "imports", "setup_module", "docstring"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# encoding: utf-8\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pytest\n\n\ndef setup_module(mod):\n mod.nose = pytest.importorskip(\"nose\")\n\n\ndef test_nose_setup(testdir):\n p = testdir.makepyfile(\n \"\"\"\n values = []\n from nose.tools import with_setup\n\n @with_setup(lambda: values.append(1), lambda: values.append(2))\n def test_hello():\n assert values == [1]\n\n def test_world():\n assert values == [1,2]\n\n test_hello.setup = lambda: values.append(1)\n test_hello.teardown = lambda: values.append(2)\n \"\"\"\n )\n result = testdir.runpytest(p, \"-p\", \"nose\")\n result.assert_outcomes(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py_test_setup_func_with_setup_decorator_test_nose_setup_func.result_assert_outcomes_pa": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py_test_setup_func_with_setup_decorator_test_nose_setup_func.result_assert_outcomes_pa", "embedding": null, "metadata": {"file_path": "testing/test_nose.py", "file_name": "test_nose.py", "file_type": "text/x-python", "category": "test", "start_line": 34, "end_line": 84, "span_ids": ["test_setup_func_not_callable.A:2", "test_setup_func_with_setup_decorator.A.f", "test_nose_setup_func", "test_setup_func_with_setup_decorator.A", "test_setup_func_not_callable", "test_setup_func_not_callable.A", "test_setup_func_with_setup_decorator"], "tokens": 236}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_setup_func_with_setup_decorator():\n from _pytest.nose import call_optional\n\n values = []\n\n class A(object):\n @pytest.fixture(autouse=True)\n def f(self):\n values.append(1)\n\n call_optional(A(), \"f\")\n assert not values\n\n\ndef test_setup_func_not_callable():\n from _pytest.nose import call_optional\n\n class A(object):\n f = 1\n\n call_optional(A(), \"f\")\n\n\ndef test_nose_setup_func(testdir):\n p = testdir.makepyfile(\n \"\"\"\n from nose.tools import with_setup\n\n values = []\n\n def my_setup():\n a = 1\n values.append(a)\n\n def my_teardown():\n b = 2\n values.append(b)\n\n @with_setup(my_setup, my_teardown)\n def test_hello():\n print(values)\n assert values == [1]\n\n def test_world():\n print(values)\n assert values == [1,2]\n\n \"\"\"\n )\n result = testdir.runpytest(p, \"-p\", \"nose\")\n result.assert_outcomes(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py_test_nose_setup_func_failure_test_nose_setup_func_failure_2.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py_test_nose_setup_func_failure_test_nose_setup_func_failure_2.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_nose.py", "file_name": "test_nose.py", "file_type": "text/x-python", "category": "test", "start_line": 87, "end_line": 127, "span_ids": ["test_nose_setup_func_failure", "test_nose_setup_func_failure_2"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nose_setup_func_failure(testdir):\n p = testdir.makepyfile(\n \"\"\"\n from nose.tools import with_setup\n\n values = []\n my_setup = lambda x: 1\n my_teardown = lambda x: 2\n\n @with_setup(my_setup, my_teardown)\n def test_hello():\n print(values)\n assert values == [1]\n\n def test_world():\n print(values)\n assert values == [1,2]\n\n \"\"\"\n )\n result = testdir.runpytest(p, \"-p\", \"nose\")\n result.stdout.fnmatch_lines([\"*TypeError: ()*\"])\n\n\ndef test_nose_setup_func_failure_2(testdir):\n testdir.makepyfile(\n \"\"\"\n values = []\n\n my_setup = 1\n my_teardown = 2\n\n def test_hello():\n assert values == []\n\n test_hello.setup = my_setup\n test_hello.teardown = my_teardown\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py_test_nose_setup_partial_test_nose_setup_partial.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py_test_nose_setup_partial_test_nose_setup_partial.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_nose.py", "file_name": "test_nose.py", "file_type": "text/x-python", "category": "test", "start_line": 130, "end_line": 162, "span_ids": ["test_nose_setup_partial"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nose_setup_partial(testdir):\n pytest.importorskip(\"functools\")\n p = testdir.makepyfile(\n \"\"\"\n from functools import partial\n\n values = []\n\n def my_setup(x):\n a = x\n values.append(a)\n\n def my_teardown(x):\n b = x\n values.append(b)\n\n my_setup_partial = partial(my_setup, 1)\n my_teardown_partial = partial(my_teardown, 2)\n\n def test_hello():\n print(values)\n assert values == [1]\n\n def test_world():\n print(values)\n assert values == [1,2]\n\n test_hello.setup = my_setup_partial\n test_hello.teardown = my_teardown_partial\n \"\"\"\n )\n result = testdir.runpytest(p, \"-p\", \"nose\")\n result.stdout.fnmatch_lines([\"*2 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py_test_module_level_setup_test_module_level_setup.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py_test_module_level_setup_test_module_level_setup.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_nose.py", "file_name": "test_nose.py", "file_type": "text/x-python", "category": "test", "start_line": 165, "end_line": 193, "span_ids": ["test_module_level_setup"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_module_level_setup(testdir):\n testdir.makepyfile(\n \"\"\"\n from nose.tools import with_setup\n items = {}\n\n def setup():\n items[1]=1\n\n def teardown():\n del items[1]\n\n def setup2():\n items[2] = 2\n\n def teardown2():\n del items[2]\n\n def test_setup_module_setup():\n assert items[1] == 1\n\n @with_setup(setup2, teardown2)\n def test_local_setup():\n assert items[2] == 2\n assert 1 not in items\n \"\"\"\n )\n result = testdir.runpytest(\"-p\", \"nose\")\n result.stdout.fnmatch_lines([\"*2 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py_test_nose_style_setup_teardown_test_nose_setup_ordering.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py_test_nose_style_setup_teardown_test_nose_setup_ordering.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_nose.py", "file_name": "test_nose.py", "file_type": "text/x-python", "category": "test", "start_line": 196, "end_line": 232, "span_ids": ["test_nose_style_setup_teardown", "test_nose_setup_ordering"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_nose_style_setup_teardown(testdir):\n testdir.makepyfile(\n \"\"\"\n values = []\n\n def setup_module():\n values.append(1)\n\n def teardown_module():\n del values[0]\n\n def test_hello():\n assert values == [1]\n\n def test_world():\n assert values == [1]\n \"\"\"\n )\n result = testdir.runpytest(\"-p\", \"nose\")\n result.stdout.fnmatch_lines([\"*2 passed*\"])\n\n\ndef test_nose_setup_ordering(testdir):\n testdir.makepyfile(\n \"\"\"\n def setup_module(mod):\n mod.visited = True\n\n class TestClass(object):\n def setup(self):\n assert visited\n def test_first(self):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py_test_apiwrapper_problem_issue260_test_apiwrapper_problem_issue260.result_assert_outcomes_pa": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py_test_apiwrapper_problem_issue260_test_apiwrapper_problem_issue260.result_assert_outcomes_pa", "embedding": null, "metadata": {"file_path": "testing/test_nose.py", "file_name": "test_nose.py", "file_type": "text/x-python", "category": "test", "start_line": 235, "end_line": 257, "span_ids": ["test_apiwrapper_problem_issue260"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_apiwrapper_problem_issue260(testdir):\n # this would end up trying a call an optional teardown on the class\n # for plain unittests we dont want nose behaviour\n testdir.makepyfile(\n \"\"\"\n import unittest\n class TestCase(unittest.TestCase):\n def setup(self):\n #should not be called in unittest testcases\n assert 0, 'setup'\n def teardown(self):\n #should not be called in unittest testcases\n assert 0, 'teardown'\n def setUp(self):\n print('setup')\n def tearDown(self):\n print('teardown')\n def test_fun(self):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.assert_outcomes(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py_test_setup_teardown_linking_issue265_test_setup_teardown_linking_issue265.reprec_assert_outcomes_pa": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py_test_setup_teardown_linking_issue265_test_setup_teardown_linking_issue265.reprec_assert_outcomes_pa", "embedding": null, "metadata": {"file_path": "testing/test_nose.py", "file_name": "test_nose.py", "file_type": "text/x-python", "category": "test", "start_line": 260, "end_line": 285, "span_ids": ["test_setup_teardown_linking_issue265"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_setup_teardown_linking_issue265(testdir):\n # we accidentally didnt integrate nose setupstate with normal setupstate\n # this test ensures that won't happen again\n testdir.makepyfile(\n '''\n import pytest\n\n class TestGeneric(object):\n def test_nothing(self):\n \"\"\"Tests the API of the implementation (for generic and specialized).\"\"\"\n\n @pytest.mark.skipif(\"True\", reason=\n \"Skip tests to check if teardown is skipped as well.\")\n class TestSkipTeardown(TestGeneric):\n\n def setup(self):\n \"\"\"Sets up my specialized implementation for $COOL_PLATFORM.\"\"\"\n raise Exception(\"should not call setup for skipped tests\")\n\n def teardown(self):\n \"\"\"Undoes the setup.\"\"\"\n raise Exception(\"should not call teardown for skipped tests\")\n '''\n )\n reprec = testdir.runpytest()\n reprec.assert_outcomes(passed=1, skipped=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py_test_SkipTest_during_collection_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_nose.py_test_SkipTest_during_collection_", "embedding": null, "metadata": {"file_path": "testing/test_nose.py", "file_name": "test_nose.py", "file_type": "text/x-python", "category": "test", "start_line": 288, "end_line": 384, "span_ids": ["test_skip_test_with_unicode", "test_SkipTest_during_collection", "test_istest_function_decorator", "test_nottest_class_decorator", "test_SkipTest_in_test", "test_istest_class_decorator", "test_nottest_function_decorator"], "tokens": 517}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_SkipTest_during_collection(testdir):\n p = testdir.makepyfile(\n \"\"\"\n import nose\n raise nose.SkipTest(\"during collection\")\n def test_failing():\n assert False\n \"\"\"\n )\n result = testdir.runpytest(p)\n result.assert_outcomes(skipped=1)\n\n\ndef test_SkipTest_in_test(testdir):\n testdir.makepyfile(\n \"\"\"\n import nose\n\n def test_skipping():\n raise nose.SkipTest(\"in test\")\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(skipped=1)\n\n\ndef test_istest_function_decorator(testdir):\n p = testdir.makepyfile(\n \"\"\"\n import nose.tools\n @nose.tools.istest\n def not_test_prefix():\n pass\n \"\"\"\n )\n result = testdir.runpytest(p)\n result.assert_outcomes(passed=1)\n\n\ndef test_nottest_function_decorator(testdir):\n testdir.makepyfile(\n \"\"\"\n import nose.tools\n @nose.tools.nottest\n def test_prefix():\n pass\n \"\"\"\n )\n reprec = testdir.inline_run()\n assert not reprec.getfailedcollections()\n calls = reprec.getreports(\"pytest_runtest_logreport\")\n assert not calls\n\n\ndef test_istest_class_decorator(testdir):\n p = testdir.makepyfile(\n \"\"\"\n import nose.tools\n @nose.tools.istest\n class NotTestPrefix(object):\n def test_method(self):\n pass\n \"\"\"\n )\n result = testdir.runpytest(p)\n result.assert_outcomes(passed=1)\n\n\ndef test_nottest_class_decorator(testdir):\n testdir.makepyfile(\n \"\"\"\n import nose.tools\n @nose.tools.nottest\n class TestPrefix(object):\n def test_method(self):\n pass\n \"\"\"\n )\n reprec = testdir.inline_run()\n assert not reprec.getfailedcollections()\n calls = reprec.getreports(\"pytest_runtest_logreport\")\n assert not calls\n\n\ndef test_skip_test_with_unicode(testdir):\n testdir.makepyfile(\n \"\"\"\n # encoding: utf-8\n import unittest\n class TestClass():\n def test_io(self):\n raise unittest.SkipTest(u'\ud83d\ude0a')\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"* 1 skipped *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_from___future___import_ab_TestParser.test_argument.assert_str_argument_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_from___future___import_ab_TestParser.test_argument.assert_str_argument_", "embedding": null, "metadata": {"file_path": "testing/test_parseopt.py", "file_name": "test_parseopt.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 49, "span_ids": ["TestParser", "TestParser.test_custom_prog", "parser", "TestParser.test_no_help_by_default", "TestParser.test_argument", "imports"], "tokens": 346}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport distutils.spawn\nimport os\nimport sys\n\nimport py\n\nimport pytest\nfrom _pytest.config import argparsing as parseopt\nfrom _pytest.config.exceptions import UsageError\n\n\n@pytest.fixture\ndef parser():\n return parseopt.Parser()\n\n\nclass TestParser(object):\n def test_no_help_by_default(self):\n parser = parseopt.Parser(usage=\"xyz\")\n pytest.raises(UsageError, lambda: parser.parse([\"-h\"]))\n\n def test_custom_prog(self, parser):\n \"\"\"Custom prog can be set for `argparse.ArgumentParser`.\"\"\"\n assert parser._getparser().prog == os.path.basename(sys.argv[0])\n parser.prog = \"custom-prog\"\n assert parser._getparser().prog == \"custom-prog\"\n\n def test_argument(self):\n with pytest.raises(parseopt.ArgumentError):\n # need a short or long option\n argument = parseopt.Argument()\n argument = parseopt.Argument(\"-t\")\n assert argument._short_opts == [\"-t\"]\n assert argument._long_opts == []\n assert argument.dest == \"t\"\n argument = parseopt.Argument(\"-t\", \"--test\")\n assert argument._short_opts == [\"-t\"]\n assert argument._long_opts == [\"--test\"]\n assert argument.dest == \"test\"\n argument = parseopt.Argument(\"-t\", \"--test\", dest=\"abc\")\n assert argument.dest == \"abc\"\n assert str(argument) == (\n \"Argument(_short_opts: ['-t'], _long_opts: ['--test'], dest: 'abc')\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_argument_type_TestParser.test_argument_type.None_3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_argument_type_TestParser.test_argument_type.None_3", "embedding": null, "metadata": {"file_path": "testing/test_parseopt.py", "file_name": "test_parseopt.py", "file_type": "text/x-python", "category": "test", "start_line": 51, "end_line": 64, "span_ids": ["TestParser.test_argument_type"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestParser(object):\n\n def test_argument_type(self):\n argument = parseopt.Argument(\"-t\", dest=\"abc\", type=int)\n assert argument.type is int\n argument = parseopt.Argument(\"-t\", dest=\"abc\", type=str)\n assert argument.type is str\n argument = parseopt.Argument(\"-t\", dest=\"abc\", type=float)\n assert argument.type is float\n with pytest.warns(DeprecationWarning):\n with pytest.raises(KeyError):\n argument = parseopt.Argument(\"-t\", dest=\"abc\", type=\"choice\")\n argument = parseopt.Argument(\n \"-t\", dest=\"abc\", type=str, choices=[\"red\", \"blue\"]\n )\n assert argument.type is str", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_argument_processopt_TestParser.test_parse_will_set_default.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_argument_processopt_TestParser.test_parse_will_set_default.None_2", "embedding": null, "metadata": {"file_path": "testing/test_parseopt.py", "file_name": "test_parseopt.py", "file_type": "text/x-python", "category": "test", "start_line": 66, "end_line": 153, "span_ids": ["TestParser.test_group_shortopt_lowercase", "TestParser.test_argument_processopt", "TestParser.test_group_add_and_get", "TestParser.test_parse_known_and_unknown_args", "TestParser.test_parser_addoption", "TestParser.test_getgroup_simple", "TestParser.test_parse2", "TestParser.test_group_addoption", "TestParser.test_parse", "TestParser.test_group_ordering", "TestParser.test_parse_will_set_default", "TestParser.test_parse_known_args", "TestParser.test_group_addoption_conflict"], "tokens": 786}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestParser(object):\n\n def test_argument_processopt(self):\n argument = parseopt.Argument(\"-t\", type=int)\n argument.default = 42\n argument.dest = \"abc\"\n res = argument.attrs()\n assert res[\"default\"] == 42\n assert res[\"dest\"] == \"abc\"\n\n def test_group_add_and_get(self, parser):\n group = parser.getgroup(\"hello\", description=\"desc\")\n assert group.name == \"hello\"\n assert group.description == \"desc\"\n\n def test_getgroup_simple(self, parser):\n group = parser.getgroup(\"hello\", description=\"desc\")\n assert group.name == \"hello\"\n assert group.description == \"desc\"\n group2 = parser.getgroup(\"hello\")\n assert group2 is group\n\n def test_group_ordering(self, parser):\n parser.getgroup(\"1\")\n parser.getgroup(\"2\")\n parser.getgroup(\"3\", after=\"1\")\n groups = parser._groups\n groups_names = [x.name for x in groups]\n assert groups_names == list(\"132\")\n\n def test_group_addoption(self):\n group = parseopt.OptionGroup(\"hello\")\n group.addoption(\"--option1\", action=\"store_true\")\n assert len(group.options) == 1\n assert isinstance(group.options[0], parseopt.Argument)\n\n def test_group_addoption_conflict(self):\n group = parseopt.OptionGroup(\"hello again\")\n group.addoption(\"--option1\", \"--option-1\", action=\"store_true\")\n with pytest.raises(ValueError) as err:\n group.addoption(\"--option1\", \"--option-one\", action=\"store_true\")\n assert str({\"--option1\"}) in str(err.value)\n\n def test_group_shortopt_lowercase(self, parser):\n group = parser.getgroup(\"hello\")\n with pytest.raises(ValueError):\n group.addoption(\"-x\", action=\"store_true\")\n assert len(group.options) == 0\n group._addoption(\"-x\", action=\"store_true\")\n assert len(group.options) == 1\n\n def test_parser_addoption(self, parser):\n group = parser.getgroup(\"custom options\")\n assert len(group.options) == 0\n group.addoption(\"--option1\", action=\"store_true\")\n assert len(group.options) == 1\n\n def test_parse(self, parser):\n parser.addoption(\"--hello\", dest=\"hello\", action=\"store\")\n args = parser.parse([\"--hello\", \"world\"])\n assert args.hello == \"world\"\n assert not getattr(args, parseopt.FILE_OR_DIR)\n\n def test_parse2(self, parser):\n args = parser.parse([py.path.local()])\n assert getattr(args, parseopt.FILE_OR_DIR)[0] == py.path.local()\n\n def test_parse_known_args(self, parser):\n parser.parse_known_args([py.path.local()])\n parser.addoption(\"--hello\", action=\"store_true\")\n ns = parser.parse_known_args([\"x\", \"--y\", \"--hello\", \"this\"])\n assert ns.hello\n assert ns.file_or_dir == [\"x\"]\n\n def test_parse_known_and_unknown_args(self, parser):\n parser.addoption(\"--hello\", action=\"store_true\")\n ns, unknown = parser.parse_known_and_unknown_args(\n [\"x\", \"--y\", \"--hello\", \"this\"]\n )\n assert ns.hello\n assert ns.file_or_dir == [\"x\"]\n assert unknown == [\"--y\", \"this\"]\n\n def test_parse_will_set_default(self, parser):\n parser.addoption(\"--hello\", dest=\"hello\", default=\"x\", action=\"store\")\n option = parser.parse([])\n assert option.hello == \"x\"\n del option.hello\n parser.parse_setoption([], option)\n assert option.hello == \"x\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_parse_setoption_TestParser.test_parse_special_destination.assert_args_ultimate_answ": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_parse_setoption_TestParser.test_parse_special_destination.assert_args_ultimate_answ", "embedding": null, "metadata": {"file_path": "testing/test_parseopt.py", "file_name": "test_parseopt.py", "file_type": "text/x-python", "category": "test", "start_line": 155, "end_line": 171, "span_ids": ["TestParser.test_parse_setoption.A:2", "TestParser.test_parse_special_destination", "TestParser.test_parse_setoption", "TestParser.test_parse_setoption.A"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestParser(object):\n\n def test_parse_setoption(self, parser):\n parser.addoption(\"--hello\", dest=\"hello\", action=\"store\")\n parser.addoption(\"--world\", dest=\"world\", default=42)\n\n class A(object):\n pass\n\n option = A()\n args = parser.parse_setoption([\"--hello\", \"world\"], option)\n assert option.hello == \"world\"\n assert option.world == 42\n assert not args\n\n def test_parse_special_destination(self, parser):\n parser.addoption(\"--ultimate-answer\", type=int)\n args = parser.parse([\"--ultimate-answer\", \"42\"])\n assert args.ultimate_answer == 42", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_parse_split_positional_arguments_TestParser.test_parse_split_positional_arguments.None_6": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_parse_split_positional_arguments_TestParser.test_parse_split_positional_arguments.None_6", "embedding": null, "metadata": {"file_path": "testing/test_parseopt.py", "file_name": "test_parseopt.py", "file_type": "text/x-python", "category": "test", "start_line": 173, "end_line": 185, "span_ids": ["TestParser.test_parse_split_positional_arguments"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestParser(object):\n\n def test_parse_split_positional_arguments(self, parser):\n parser.addoption(\"-R\", action=\"store_true\")\n parser.addoption(\"-S\", action=\"store_false\")\n args = parser.parse([\"-R\", \"4\", \"2\", \"-S\"])\n assert getattr(args, parseopt.FILE_OR_DIR) == [\"4\", \"2\"]\n args = parser.parse([\"-R\", \"-S\", \"4\", \"2\", \"-R\"])\n assert getattr(args, parseopt.FILE_OR_DIR) == [\"4\", \"2\"]\n assert args.R is True\n assert args.S is False\n args = parser.parse([\"-R\", \"4\", \"-S\", \"2\"])\n assert getattr(args, parseopt.FILE_OR_DIR) == [\"4\", \"2\"]\n assert args.R is True\n assert args.S is False", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_parse_defaultgetter_TestParser.test_parse_defaultgetter.assert_option_no_is_False": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_parse_defaultgetter_TestParser.test_parse_defaultgetter.assert_option_no_is_False", "embedding": null, "metadata": {"file_path": "testing/test_parseopt.py", "file_name": "test_parseopt.py", "file_type": "text/x-python", "category": "test", "start_line": 187, "end_line": 203, "span_ids": ["TestParser.test_parse_defaultgetter"], "tokens": 151}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestParser(object):\n\n def test_parse_defaultgetter(self):\n def defaultget(option):\n if not hasattr(option, \"type\"):\n return\n if option.type is int:\n option.default = 42\n elif option.type is str:\n option.default = \"world\"\n\n parser = parseopt.Parser(processopt=defaultget)\n parser.addoption(\"--this\", dest=\"this\", type=int, action=\"store\")\n parser.addoption(\"--hello\", dest=\"hello\", type=str, action=\"store\")\n parser.addoption(\"--no\", dest=\"no\", action=\"store_true\")\n option = parser.parse([])\n assert option.hello == \"world\"\n assert option.this == 42\n assert option.no is False", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_drop_short_helper_TestParser.test_drop_short_helper.assert_join_args_file": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_drop_short_helper_TestParser.test_drop_short_helper.assert_join_args_file", "embedding": null, "metadata": {"file_path": "testing/test_parseopt.py", "file_name": "test_parseopt.py", "file_type": "text/x-python", "category": "test", "start_line": 205, "end_line": 240, "span_ids": ["TestParser.test_drop_short_helper"], "tokens": 446}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestParser(object):\n\n def test_drop_short_helper(self):\n parser = argparse.ArgumentParser(\n formatter_class=parseopt.DropShorterLongHelpFormatter\n )\n parser.add_argument(\n \"-t\", \"--twoword\", \"--duo\", \"--two-word\", \"--two\", help=\"foo\"\n ).map_long_option = {\"two\": \"two-word\"}\n # throws error on --deux only!\n parser.add_argument(\n \"-d\", \"--deuxmots\", \"--deux-mots\", action=\"store_true\", help=\"foo\"\n ).map_long_option = {\"deux\": \"deux-mots\"}\n parser.add_argument(\"-s\", action=\"store_true\", help=\"single short\")\n parser.add_argument(\"--abc\", \"-a\", action=\"store_true\", help=\"bar\")\n parser.add_argument(\"--klm\", \"-k\", \"--kl-m\", action=\"store_true\", help=\"bar\")\n parser.add_argument(\n \"-P\", \"--pq-r\", \"-p\", \"--pqr\", action=\"store_true\", help=\"bar\"\n )\n parser.add_argument(\n \"--zwei-wort\", \"--zweiwort\", \"--zweiwort\", action=\"store_true\", help=\"bar\"\n )\n parser.add_argument(\n \"-x\", \"--exit-on-first\", \"--exitfirst\", action=\"store_true\", help=\"spam\"\n ).map_long_option = {\"exitfirst\": \"exit-on-first\"}\n parser.add_argument(\"files_and_dirs\", nargs=\"*\")\n args = parser.parse_args([\"-k\", \"--duo\", \"hallo\", \"--exitfirst\"])\n assert args.twoword == \"hallo\"\n assert args.klm is True\n assert args.zwei_wort is False\n assert args.exit_on_first is True\n assert args.s is False\n args = parser.parse_args([\"--deux-mots\"])\n with pytest.raises(AttributeError):\n assert args.deux_mots is True\n assert args.deuxmots is True\n args = parser.parse_args([\"file\", \"dir\"])\n assert \"|\".join(args.files_and_dirs) == \"file|dir\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_drop_short_0_TestParser._testing_would_be_more_h": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_drop_short_0_TestParser._testing_would_be_more_h", "embedding": null, "metadata": {"file_path": "testing/test_parseopt.py", "file_name": "test_parseopt.py", "file_type": "text/x-python", "category": "test", "start_line": 242, "end_line": 268, "span_ids": ["TestParser.test_drop_short_2", "TestParser.test_drop_short_3", "TestParser.test_drop_short_0", "TestParser.test_drop_short_help0"], "tokens": 288}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestParser(object):\n\n def test_drop_short_0(self, parser):\n parser.addoption(\"--funcarg\", \"--func-arg\", action=\"store_true\")\n parser.addoption(\"--abc-def\", \"--abc-def\", action=\"store_true\")\n parser.addoption(\"--klm-hij\", action=\"store_true\")\n args = parser.parse([\"--funcarg\", \"--k\"])\n assert args.funcarg is True\n assert args.abc_def is False\n assert args.klm_hij is True\n\n def test_drop_short_2(self, parser):\n parser.addoption(\"--func-arg\", \"--doit\", action=\"store_true\")\n args = parser.parse([\"--doit\"])\n assert args.func_arg is True\n\n def test_drop_short_3(self, parser):\n parser.addoption(\"--func-arg\", \"--funcarg\", \"--doit\", action=\"store_true\")\n args = parser.parse([\"abcd\"])\n assert args.func_arg is False\n assert args.file_or_dir == [\"abcd\"]\n\n def test_drop_short_help0(self, parser, capsys):\n parser.addoption(\"--func-args\", \"--doit\", help=\"foo\", action=\"store_true\")\n parser.parse([])\n help = parser.optparser.format_help()\n assert \"--func-args, --doit foo\" in help\n\n # testing would be more helpful with all help generated", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_drop_short_help1_TestParser.test_drop_short_help1.assert_doit_func_arg": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_drop_short_help1_TestParser.test_drop_short_help1.assert_doit_func_arg", "embedding": null, "metadata": {"file_path": "testing/test_parseopt.py", "file_name": "test_parseopt.py", "file_type": "text/x-python", "category": "test", "start_line": 269, "end_line": 281, "span_ids": ["TestParser.test_drop_short_help1"], "tokens": 118}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestParser(object):\n def test_drop_short_help1(self, parser, capsys):\n group = parser.getgroup(\"general\")\n group.addoption(\"--doit\", \"--func-args\", action=\"store_true\", help=\"foo\")\n group._addoption(\n \"-h\",\n \"--help\",\n action=\"store_true\",\n dest=\"help\",\n help=\"show help message and configuration info\",\n )\n parser.parse([\"-h\"])\n help = parser.optparser.format_help()\n assert \"-doit, --func-args foo\" in help", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_multiple_metavar_help_TestParser.test_multiple_metavar_help.assert_preferences_val": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_TestParser.test_multiple_metavar_help_TestParser.test_multiple_metavar_help.assert_preferences_val", "embedding": null, "metadata": {"file_path": "testing/test_parseopt.py", "file_name": "test_parseopt.py", "file_type": "text/x-python", "category": "test", "start_line": 283, "end_line": 295, "span_ids": ["TestParser.test_multiple_metavar_help"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestParser(object):\n\n def test_multiple_metavar_help(self, parser):\n \"\"\"\n Help text for options with a metavar tuple should display help\n in the form \"--preferences=value1 value2 value3\" (#2004).\n \"\"\"\n group = parser.getgroup(\"general\")\n group.addoption(\n \"--preferences\", metavar=(\"value1\", \"value2\", \"value3\"), nargs=3\n )\n group._addoption(\"-h\", \"--help\", action=\"store_true\", dest=\"help\")\n parser.parse([\"-h\"])\n help = parser.optparser.format_help()\n assert \"--preferences=value1 value2 value3\" in help", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_test_argcomplete_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_parseopt.py_test_argcomplete_", "embedding": null, "metadata": {"file_path": "testing/test_parseopt.py", "file_name": "test_parseopt.py", "file_type": "text/x-python", "category": "test", "start_line": 298, "end_line": 336, "span_ids": ["test_argcomplete"], "tokens": 451}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_argcomplete(testdir, monkeypatch):\n if not distutils.spawn.find_executable(\"bash\"):\n pytest.skip(\"bash not available\")\n script = str(testdir.tmpdir.join(\"test_argcomplete\"))\n\n with open(str(script), \"w\") as fp:\n # redirect output from argcomplete to stdin and stderr is not trivial\n # http://stackoverflow.com/q/12589419/1307905\n # so we use bash\n fp.write('COMP_WORDBREAKS=\"$COMP_WORDBREAKS\" python -m pytest 8>&1 9>&2')\n # alternative would be exteneded Testdir.{run(),_run(),popen()} to be able\n # to handle a keyword argument env that replaces os.environ in popen or\n # extends the copy, advantage: could not forget to restore\n monkeypatch.setenv(\"_ARGCOMPLETE\", \"1\")\n monkeypatch.setenv(\"_ARGCOMPLETE_IFS\", \"\\x0b\")\n monkeypatch.setenv(\"COMP_WORDBREAKS\", \" \\\\t\\\\n\\\"\\\\'><=;|&(:\")\n\n arg = \"--fu\"\n monkeypatch.setenv(\"COMP_LINE\", \"pytest \" + arg)\n monkeypatch.setenv(\"COMP_POINT\", str(len(\"pytest \" + arg)))\n result = testdir.run(\"bash\", str(script), arg)\n if result.ret == 255:\n # argcomplete not found\n pytest.skip(\"argcomplete not available\")\n elif not result.stdout.str():\n pytest.skip(\n \"bash provided no output on stdout, argcomplete not available? (stderr={!r})\".format(\n result.stderr.str()\n )\n )\n else:\n result.stdout.fnmatch_lines([\"--funcargs\", \"--fulltrace\"])\n os.mkdir(\"test_argcomplete.d\")\n arg = \"test_argc\"\n monkeypatch.setenv(\"COMP_LINE\", \"pytest \" + arg)\n monkeypatch.setenv(\"COMP_POINT\", str(len(\"pytest \" + arg)))\n result = testdir.run(\"bash\", str(script), arg)\n result.stdout.fnmatch_lines([\"test_argcomplete\", \"test_argcomplete.d/\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pastebin.py__encoding_utf_8_TestPasteCapture.test_failed.assert_reprec_countoutcom": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pastebin.py__encoding_utf_8_TestPasteCapture.test_failed.assert_reprec_countoutcom", "embedding": null, "metadata": {"file_path": "testing/test_pastebin.py", "file_name": "test_pastebin.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 35, "span_ids": ["TestPasteCapture.test_failed", "docstring", "TestPasteCapture", "TestPasteCapture.pastebinlist", "imports"], "tokens": 230}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# encoding: utf-8\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\n\nimport pytest\n\n\nclass TestPasteCapture(object):\n @pytest.fixture\n def pastebinlist(self, monkeypatch, request):\n pastebinlist = []\n plugin = request.config.pluginmanager.getplugin(\"pastebin\")\n monkeypatch.setattr(plugin, \"create_new_paste\", pastebinlist.append)\n return pastebinlist\n\n def test_failed(self, testdir, pastebinlist):\n testpath = testdir.makepyfile(\n \"\"\"\n import pytest\n def test_pass():\n pass\n def test_fail():\n assert 0\n def test_skip():\n pytest.skip(\"\")\n \"\"\"\n )\n reprec = testdir.inline_run(testpath, \"--paste=failed\")\n assert len(pastebinlist) == 1\n s = pastebinlist[0]\n assert s.find(\"def test_fail\") != -1\n assert reprec.countoutcomes() == [1, 1, 1]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pastebin.py_TestPasteCapture.test_all_TestPasteCapture.test_all.matcher_fnmatch_lines_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pastebin.py_TestPasteCapture.test_all_TestPasteCapture.test_all.matcher_fnmatch_lines_", "embedding": null, "metadata": {"file_path": "testing/test_pastebin.py", "file_name": "test_pastebin.py", "file_type": "text/x-python", "category": "test", "start_line": 37, "end_line": 63, "span_ids": ["TestPasteCapture.test_all"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPasteCapture(object):\n\n def test_all(self, testdir, pastebinlist):\n from _pytest.pytester import LineMatcher\n\n testpath = testdir.makepyfile(\n \"\"\"\n import pytest\n def test_pass():\n pass\n def test_fail():\n assert 0\n def test_skip():\n pytest.skip(\"\")\n \"\"\"\n )\n reprec = testdir.inline_run(testpath, \"--pastebin=all\", \"-v\")\n assert reprec.countoutcomes() == [1, 1, 1]\n assert len(pastebinlist) == 1\n contents = pastebinlist[0].decode(\"utf-8\")\n matcher = LineMatcher(contents.splitlines())\n matcher.fnmatch_lines(\n [\n \"*test_pass PASSED*\",\n \"*test_fail FAILED*\",\n \"*test_skip SKIPPED*\",\n \"*== 1 failed, 1 passed, 1 skipped in *\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pastebin.py_TestPasteCapture.test_non_ascii_paste_text_TestPasteCapture.test_non_ascii_paste_text.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pastebin.py_TestPasteCapture.test_non_ascii_paste_text_TestPasteCapture.test_non_ascii_paste_text.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_pastebin.py", "file_name": "test_pastebin.py", "file_type": "text/x-python", "category": "test", "start_line": 65, "end_line": 87, "span_ids": ["TestPasteCapture.test_non_ascii_paste_text"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPasteCapture(object):\n\n def test_non_ascii_paste_text(self, testdir):\n \"\"\"Make sure that text which contains non-ascii characters is pasted\n correctly. See #1219.\n \"\"\"\n testdir.makepyfile(\n test_unicode=\"\"\"\n # encoding: utf-8\n def test():\n assert '\u263a' == 1\n \"\"\"\n )\n result = testdir.runpytest(\"--pastebin=all\")\n if sys.version_info[0] == 3:\n expected_msg = \"*assert '\u263a' == 1*\"\n else:\n expected_msg = \"*assert '\\\\xe2\\\\x98\\\\xba' == 1*\"\n result.stdout.fnmatch_lines(\n [\n expected_msg,\n \"*== 1 failed in *\",\n \"*Sending information to Paste Service*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pastebin.py_TestPaste_TestPaste.mocked_urlopen.return.calls": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pastebin.py_TestPaste_TestPaste.mocked_urlopen.return.calls", "embedding": null, "metadata": {"file_path": "testing/test_pastebin.py", "file_name": "test_pastebin.py", "file_type": "text/x-python", "category": "test", "start_line": 90, "end_line": 121, "span_ids": ["TestPaste.mocked_urlopen", "TestPaste.mocked_urlopen.mocked.DummyFile.read", "TestPaste", "TestPaste.pastebin", "TestPaste.mocked_urlopen.mocked.DummyFile"], "tokens": 197}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPaste(object):\n @pytest.fixture\n def pastebin(self, request):\n return request.config.pluginmanager.getplugin(\"pastebin\")\n\n @pytest.fixture\n def mocked_urlopen(self, monkeypatch):\n \"\"\"\n monkeypatch the actual urlopen calls done by the internal plugin\n function that connects to bpaste service.\n \"\"\"\n calls = []\n\n def mocked(url, data):\n calls.append((url, data))\n\n class DummyFile(object):\n def read(self):\n # part of html of a normal response\n return b'View raw.'\n\n return DummyFile()\n\n if sys.version_info < (3, 0):\n import urllib\n\n monkeypatch.setattr(urllib, \"urlopen\", mocked)\n else:\n import urllib.request\n\n monkeypatch.setattr(urllib.request, \"urlopen\", mocked)\n return calls", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pastebin.py_TestPaste.test_create_new_paste_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pastebin.py_TestPaste.test_create_new_paste_", "embedding": null, "metadata": {"file_path": "testing/test_pastebin.py", "file_name": "test_pastebin.py", "file_type": "text/x-python", "category": "test", "start_line": 123, "end_line": 134, "span_ids": ["TestPaste.test_create_new_paste"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPaste(object):\n\n def test_create_new_paste(self, pastebin, mocked_urlopen):\n result = pastebin.create_new_paste(b\"full-paste-contents\")\n assert result == \"https://bpaste.net/show/3c0c6750bd\"\n assert len(mocked_urlopen) == 1\n url, data = mocked_urlopen[0]\n assert type(data) is bytes\n lexer = \"python3\" if sys.version_info[0] == 3 else \"python\"\n assert url == \"https://bpaste.net\"\n assert \"lexer=%s\" % lexer in data.decode()\n assert \"code=full-paste-contents\" in data.decode()\n assert \"expiry=1week\" in data.decode()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pathlib.py_sys_TestPort.if_sys_platform_win32.else_.drv2._d_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pathlib.py_sys_TestPort.if_sys_platform_win32.else_.drv2._d_", "embedding": null, "metadata": {"file_path": "testing/test_pathlib.py", "file_name": "test_pathlib.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 37, "span_ids": ["TestPort:3", "imports", "TestPort.match", "TestPort"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import sys\n\nimport py\n\nimport pytest\nfrom _pytest.pathlib import fnmatch_ex\nfrom _pytest.pathlib import get_lock_path\nfrom _pytest.pathlib import maybe_delete_a_numbered_dir\nfrom _pytest.pathlib import Path\n\n\nclass TestPort:\n \"\"\"Test that our port of py.common.FNMatcher (fnmatch_ex) produces the same results as the\n original py.path.local.fnmatch method.\n \"\"\"\n\n @pytest.fixture(params=[\"pathlib\", \"py.path\"])\n def match(self, request):\n if request.param == \"py.path\":\n\n def match_(pattern, path):\n return py.path.local(path).fnmatch(pattern)\n\n else:\n assert request.param == \"pathlib\"\n\n def match_(pattern, path):\n return fnmatch_ex(pattern, path)\n\n return match_\n\n if sys.platform == \"win32\":\n drv1 = \"c:\"\n drv2 = \"d:\"\n else:\n drv1 = \"/c\"\n drv2 = \"/d\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pathlib.py_TestPort.test_matching_TestPort.test_matching.assert_match_pattern_pat": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pathlib.py_TestPort.test_matching_TestPort.test_matching.assert_match_pattern_pat", "embedding": null, "metadata": {"file_path": "testing/test_pathlib.py", "file_name": "test_pathlib.py", "file_type": "text/x-python", "category": "test", "start_line": 39, "end_line": 54, "span_ids": ["TestPort.test_matching"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPort:\n\n @pytest.mark.parametrize(\n \"pattern, path\",\n [\n (\"*.py\", \"foo.py\"),\n (\"*.py\", \"bar/foo.py\"),\n (\"test_*.py\", \"foo/test_foo.py\"),\n (\"tests/*.py\", \"tests/foo.py\"),\n (drv1 + \"/*.py\", drv1 + \"/foo.py\"),\n (drv1 + \"/foo/*.py\", drv1 + \"/foo/foo.py\"),\n (\"tests/**/test*.py\", \"tests/foo/test_foo.py\"),\n (\"tests/**/doc/test*.py\", \"tests/foo/bar/doc/test_foo.py\"),\n (\"tests/**/doc/**/test*.py\", \"tests/foo/doc/bar/test_foo.py\"),\n ],\n )\n def test_matching(self, match, pattern, path):\n assert match(pattern, path)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pathlib.py_TestPort.test_not_matching_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pathlib.py_TestPort.test_not_matching_", "embedding": null, "metadata": {"file_path": "testing/test_pathlib.py", "file_name": "test_pathlib.py", "file_type": "text/x-python", "category": "test", "start_line": 56, "end_line": 87, "span_ids": ["TestPort.test_not_matching", "test_access_denied_during_cleanup"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPort:\n\n @pytest.mark.parametrize(\n \"pattern, path\",\n [\n (\"*.py\", \"foo.pyc\"),\n (\"*.py\", \"foo/foo.pyc\"),\n (\"tests/*.py\", \"foo/foo.py\"),\n (drv1 + \"/*.py\", drv2 + \"/foo.py\"),\n (drv1 + \"/foo/*.py\", drv2 + \"/foo/foo.py\"),\n (\"tests/**/test*.py\", \"tests/foo.py\"),\n (\"tests/**/test*.py\", \"foo/test_foo.py\"),\n (\"tests/**/doc/test*.py\", \"tests/foo/bar/doc/foo.py\"),\n (\"tests/**/doc/test*.py\", \"tests/foo/bar/test_foo.py\"),\n ],\n )\n def test_not_matching(self, match, pattern, path):\n assert not match(pattern, path)\n\n\ndef test_access_denied_during_cleanup(tmp_path, monkeypatch):\n \"\"\"Ensure that deleting a numbered dir does not fail because of OSErrors (#4262).\"\"\"\n path = tmp_path / \"temp-1\"\n path.mkdir()\n\n def renamed_failed(*args):\n raise OSError(\"access denied\")\n\n monkeypatch.setattr(Path, \"rename\", renamed_failed)\n\n lock_path = get_lock_path(path)\n maybe_delete_a_numbered_dir(path)\n assert not lock_path.is_file()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_from___future___import_ab_custom_pdb_calls.return.called": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_from___future___import_ab_custom_pdb_calls.return.called", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 50, "span_ids": ["impl", "custom_pdb_calls._CustomPdb:2", "custom_pdb_calls", "runpdb_and_get_report", "custom_pdb_calls._CustomPdb", "imports"], "tokens": 278}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport platform\nimport sys\n\nimport _pytest._code\nimport pytest\nfrom _pytest.debugging import _validate_usepdb_cls\n\ntry:\n breakpoint\nexcept NameError:\n SUPPORTS_BREAKPOINT_BUILTIN = False\nelse:\n SUPPORTS_BREAKPOINT_BUILTIN = True\n\n\n_ENVIRON_PYTHONBREAKPOINT = os.environ.get(\"PYTHONBREAKPOINT\", \"\")\n\n\ndef runpdb_and_get_report(testdir, source):\n p = testdir.makepyfile(source)\n result = testdir.runpytest_inprocess(\"--pdb\", p)\n reports = result.reprec.getreports(\"pytest_runtest_logreport\")\n assert len(reports) == 3, reports # setup/call/teardown\n return reports[1]\n\n\n@pytest.fixture\ndef custom_pdb_calls():\n called = []\n\n # install dummy debugger class and track which methods were called on it\n class _CustomPdb(object):\n quitting = False\n\n def __init__(self, *args, **kwargs):\n called.append(\"init\")\n\n def reset(self):\n called.append(\"reset\")\n\n def interaction(self, *args):\n called.append(\"interaction\")\n\n _pytest._CustomPdb = _CustomPdb\n return called", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_custom_debugger_hook_custom_debugger_hook.del__pytest__CustomDebugg": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_custom_debugger_hook_custom_debugger_hook.del__pytest__CustomDebugg", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 55, "end_line": 76, "span_ids": ["custom_debugger_hook", "custom_debugger_hook._CustomDebugger.__init__", "custom_debugger_hook._CustomDebugger"], "tokens": 123}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture\ndef custom_debugger_hook():\n called = []\n\n # install dummy debugger class and track which methods were called on it\n class _CustomDebugger(object):\n def __init__(self, *args, **kwargs):\n called.append(\"init\")\n\n def reset(self):\n called.append(\"reset\")\n\n def interaction(self, *args):\n called.append(\"interaction\")\n\n def set_trace(self, frame):\n print(\"**CustomDebugger**\")\n called.append(\"set_trace\")\n\n _pytest._CustomDebugger = _CustomDebugger\n yield called\n del _pytest._CustomDebugger", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB_TestPDB.flush.if_child_isalive_.child_wait_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB_TestPDB.flush.if_child_isalive_.child_wait_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 79, "end_line": 158, "span_ids": ["TestPDB.test_pdb_on_xfail", "TestPDB.test_pdb_on_fail", "TestPDB.test_pdb_on_BdbQuit", "TestPDB.test_pdb_on_KeyboardInterrupt", "TestPDB.test_pdb_on_skip", "TestPDB", "TestPDB.flush", "TestPDB.pdblist"], "tokens": 478}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPDB(object):\n @pytest.fixture\n def pdblist(self, request):\n monkeypatch = request.getfixturevalue(\"monkeypatch\")\n pdblist = []\n\n def mypdb(*args):\n pdblist.append(args)\n\n plugin = request.config.pluginmanager.getplugin(\"debugging\")\n monkeypatch.setattr(plugin, \"post_mortem\", mypdb)\n return pdblist\n\n def test_pdb_on_fail(self, testdir, pdblist):\n rep = runpdb_and_get_report(\n testdir,\n \"\"\"\n def test_func():\n assert 0\n \"\"\",\n )\n assert rep.failed\n assert len(pdblist) == 1\n tb = _pytest._code.Traceback(pdblist[0][0])\n assert tb[-1].name == \"test_func\"\n\n def test_pdb_on_xfail(self, testdir, pdblist):\n rep = runpdb_and_get_report(\n testdir,\n \"\"\"\n import pytest\n @pytest.mark.xfail\n def test_func():\n assert 0\n \"\"\",\n )\n assert \"xfail\" in rep.keywords\n assert not pdblist\n\n def test_pdb_on_skip(self, testdir, pdblist):\n rep = runpdb_and_get_report(\n testdir,\n \"\"\"\n import pytest\n def test_func():\n pytest.skip(\"hello\")\n \"\"\",\n )\n assert rep.skipped\n assert len(pdblist) == 0\n\n def test_pdb_on_BdbQuit(self, testdir, pdblist):\n rep = runpdb_and_get_report(\n testdir,\n \"\"\"\n import bdb\n def test_func():\n raise bdb.BdbQuit\n \"\"\",\n )\n assert rep.failed\n assert len(pdblist) == 0\n\n def test_pdb_on_KeyboardInterrupt(self, testdir, pdblist):\n rep = runpdb_and_get_report(\n testdir,\n \"\"\"\n def test_func():\n raise KeyboardInterrupt\n \"\"\",\n )\n assert rep.failed\n assert len(pdblist) == 1\n\n @staticmethod\n def flush(child):\n if platform.system() == \"Darwin\":\n return\n if child.isalive():\n child.wait()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_unittest_postmortem_TestPDB.test_pdb_unittest_postmortem.self_flush_child_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_unittest_postmortem_TestPDB.test_pdb_unittest_postmortem.self_flush_child_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 160, "end_line": 178, "span_ids": ["TestPDB.test_pdb_unittest_postmortem"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPDB(object):\n\n def test_pdb_unittest_postmortem(self, testdir):\n p1 = testdir.makepyfile(\n \"\"\"\n import unittest\n class Blub(unittest.TestCase):\n def tearDown(self):\n self.filename = None\n def test_false(self):\n self.filename = 'debug' + '.me'\n assert 0\n \"\"\"\n )\n child = testdir.spawn_pytest(\"--pdb %s\" % p1)\n child.expect(\"Pdb\")\n child.sendline(\"p self.filename\")\n child.sendeof()\n rest = child.read().decode(\"utf8\")\n assert \"debug.me\" in rest\n self.flush(child)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_unittest_skip_TestPDB.test_pdb_unittest_skip.self_flush_child_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_unittest_skip_TestPDB.test_pdb_unittest_skip.self_flush_child_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 180, "end_line": 195, "span_ids": ["TestPDB.test_pdb_unittest_skip"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPDB(object):\n\n def test_pdb_unittest_skip(self, testdir):\n \"\"\"Test for issue #2137\"\"\"\n p1 = testdir.makepyfile(\n \"\"\"\n import unittest\n @unittest.skipIf(True, 'Skipping also with pdb active')\n class MyTestCase(unittest.TestCase):\n def test_one(self):\n assert 0\n \"\"\"\n )\n child = testdir.spawn_pytest(\"-rs --pdb %s\" % p1)\n child.expect(\"Skipping also with pdb active\")\n child.expect(\"1 skipped in\")\n child.sendeof()\n self.flush(child)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_print_captured_stdout_and_stderr_TestPDB.test_pdb_print_captured_stdout_and_stderr.self_flush_child_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_print_captured_stdout_and_stderr_TestPDB.test_pdb_print_captured_stdout_and_stderr.self_flush_child_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 197, "end_line": 224, "span_ids": ["TestPDB.test_pdb_print_captured_stdout_and_stderr"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPDB(object):\n\n def test_pdb_print_captured_stdout_and_stderr(self, testdir):\n p1 = testdir.makepyfile(\n \"\"\"\n def test_1():\n import sys\n sys.stderr.write(\"get\\\\x20rekt\")\n print(\"get\\\\x20rekt\")\n assert False\n\n def test_not_called_due_to_quit():\n pass\n \"\"\"\n )\n child = testdir.spawn_pytest(\"--pdb %s\" % p1)\n child.expect(\"captured stdout\")\n child.expect(\"get rekt\")\n child.expect(\"captured stderr\")\n child.expect(\"get rekt\")\n child.expect(\"traceback\")\n child.expect(\"def test_1\")\n child.expect(\"Pdb\")\n child.sendeof()\n rest = child.read().decode(\"utf8\")\n assert \"Exit: Quitting debugger\" in rest\n assert \"= 1 failed in\" in rest\n assert \"def test_1\" not in rest\n assert \"get rekt\" not in rest\n self.flush(child)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_dont_print_empty_captured_stdout_and_stderr_TestPDB.test_pdb_dont_print_empty_captured_stdout_and_stderr.self_flush_child_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_dont_print_empty_captured_stdout_and_stderr_TestPDB.test_pdb_dont_print_empty_captured_stdout_and_stderr.self_flush_child_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 226, "end_line": 239, "span_ids": ["TestPDB.test_pdb_dont_print_empty_captured_stdout_and_stderr"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPDB(object):\n\n def test_pdb_dont_print_empty_captured_stdout_and_stderr(self, testdir):\n p1 = testdir.makepyfile(\n \"\"\"\n def test_1():\n assert False\n \"\"\"\n )\n child = testdir.spawn_pytest(\"--pdb %s\" % p1)\n child.expect(\"Pdb\")\n output = child.before.decode(\"utf8\")\n child.sendeof()\n assert \"captured stdout\" not in output\n assert \"captured stderr\" not in output\n self.flush(child)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_print_captured_logs_TestPDB.test_pdb_print_captured_logs.self_flush_child_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_print_captured_logs_TestPDB.test_pdb_print_captured_logs.self_flush_child_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 241, "end_line": 261, "span_ids": ["TestPDB.test_pdb_print_captured_logs"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPDB(object):\n\n @pytest.mark.parametrize(\"showcapture\", [\"all\", \"no\", \"log\"])\n def test_pdb_print_captured_logs(self, testdir, showcapture):\n p1 = testdir.makepyfile(\n \"\"\"\n def test_1():\n import logging\n logging.warn(\"get \" + \"rekt\")\n assert False\n \"\"\"\n )\n child = testdir.spawn_pytest(\n \"--show-capture={} --pdb {}\".format(showcapture, p1)\n )\n if showcapture in (\"all\", \"log\"):\n child.expect(\"captured log\")\n child.expect(\"get rekt\")\n child.expect(\"Pdb\")\n child.sendeof()\n rest = child.read().decode(\"utf8\")\n assert \"1 failed\" in rest\n self.flush(child)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_print_captured_logs_nologging_TestPDB.test_pdb_print_captured_logs_nologging.self_flush_child_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_print_captured_logs_nologging_TestPDB.test_pdb_print_captured_logs_nologging.self_flush_child_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 263, "end_line": 280, "span_ids": ["TestPDB.test_pdb_print_captured_logs_nologging"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPDB(object):\n\n def test_pdb_print_captured_logs_nologging(self, testdir):\n p1 = testdir.makepyfile(\n \"\"\"\n def test_1():\n import logging\n logging.warn(\"get \" + \"rekt\")\n assert False\n \"\"\"\n )\n child = testdir.spawn_pytest(\"--show-capture=all --pdb -p no:logging %s\" % p1)\n child.expect(\"get rekt\")\n output = child.before.decode(\"utf8\")\n assert \"captured log\" not in output\n child.expect(\"Pdb\")\n child.sendeof()\n rest = child.read().decode(\"utf8\")\n assert \"1 failed\" in rest\n self.flush(child)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_interaction_exception_TestPDB.test_pdb_interaction_on_collection_issue181.self_flush_child_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_interaction_exception_TestPDB.test_pdb_interaction_on_collection_issue181.self_flush_child_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 282, "end_line": 314, "span_ids": ["TestPDB.test_pdb_interaction_exception", "TestPDB.test_pdb_interaction_on_collection_issue181"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPDB(object):\n\n def test_pdb_interaction_exception(self, testdir):\n p1 = testdir.makepyfile(\n \"\"\"\n import pytest\n def globalfunc():\n pass\n def test_1():\n pytest.raises(ValueError, globalfunc)\n \"\"\"\n )\n child = testdir.spawn_pytest(\"--pdb %s\" % p1)\n child.expect(\".*def test_1\")\n child.expect(\".*pytest.raises.*globalfunc\")\n child.expect(\"Pdb\")\n child.sendline(\"globalfunc\")\n child.expect(\".*function\")\n child.sendeof()\n child.expect(\"1 failed\")\n self.flush(child)\n\n def test_pdb_interaction_on_collection_issue181(self, testdir):\n p1 = testdir.makepyfile(\n \"\"\"\n import pytest\n xxx\n \"\"\"\n )\n child = testdir.spawn_pytest(\"--pdb %s\" % p1)\n # child.expect(\".*import pytest.*\")\n child.expect(\"Pdb\")\n child.sendline(\"c\")\n child.expect(\"1 error\")\n self.flush(child)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_interaction_on_internal_error_TestPDB.test_pdb_interaction_on_internal_error.self_flush_child_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_interaction_on_internal_error_TestPDB.test_pdb_interaction_on_internal_error.self_flush_child_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 316, "end_line": 340, "span_ids": ["TestPDB.test_pdb_interaction_on_internal_error"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPDB(object):\n\n def test_pdb_interaction_on_internal_error(self, testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_runtest_protocol():\n 0/0\n \"\"\"\n )\n p1 = testdir.makepyfile(\"def test_func(): pass\")\n child = testdir.spawn_pytest(\"--pdb %s\" % p1)\n child.expect(\"Pdb\")\n\n # INTERNALERROR is only displayed once via terminal reporter.\n assert (\n len(\n [\n x\n for x in child.before.decode().splitlines()\n if x.startswith(\"INTERNALERROR> Traceback\")\n ]\n )\n == 1\n )\n\n child.sendeof()\n self.flush(child)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_interaction_capturing_simple_TestPDB.test_pdb_interaction_capturing_simple.self_flush_child_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_interaction_capturing_simple_TestPDB.test_pdb_interaction_capturing_simple.self_flush_child_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 342, "end_line": 364, "span_ids": ["TestPDB.test_pdb_interaction_capturing_simple"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPDB(object):\n\n def test_pdb_interaction_capturing_simple(self, testdir):\n p1 = testdir.makepyfile(\n \"\"\"\n import pytest\n def test_1():\n i = 0\n print(\"hello17\")\n pytest.set_trace()\n i == 1\n assert 0\n \"\"\"\n )\n child = testdir.spawn_pytest(str(p1))\n child.expect(r\"test_1\\(\\)\")\n child.expect(\"i == 1\")\n child.expect(\"Pdb\")\n child.sendline(\"c\")\n rest = child.read().decode(\"utf-8\")\n assert \"AssertionError\" in rest\n assert \"1 failed\" in rest\n assert \"def test_1\" in rest\n assert \"hello17\" in rest # out is captured\n self.flush(child)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_set_trace_kwargs_TestPDB.test_pdb_set_trace_kwargs.self_flush_child_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_set_trace_kwargs_TestPDB.test_pdb_set_trace_kwargs.self_flush_child_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 366, "end_line": 387, "span_ids": ["TestPDB.test_pdb_set_trace_kwargs"], "tokens": 179}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPDB(object):\n\n def test_pdb_set_trace_kwargs(self, testdir):\n p1 = testdir.makepyfile(\n \"\"\"\n import pytest\n def test_1():\n i = 0\n print(\"hello17\")\n pytest.set_trace(header=\"== my_header ==\")\n x = 3\n assert 0\n \"\"\"\n )\n child = testdir.spawn_pytest(str(p1))\n child.expect(\"== my_header ==\")\n assert \"PDB set_trace\" not in child.before.decode()\n child.expect(\"Pdb\")\n child.sendline(\"c\")\n rest = child.read().decode(\"utf-8\")\n assert \"1 failed\" in rest\n assert \"def test_1\" in rest\n assert \"hello17\" in rest # out is captured\n self.flush(child)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_set_trace_interception_TestPDB.test_pdb_and_capsys.self_flush_child_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_set_trace_interception_TestPDB.test_pdb_and_capsys.self_flush_child_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 387, "end_line": 420, "span_ids": ["TestPDB.test_pdb_and_capsys", "TestPDB.test_pdb_set_trace_interception"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPDB(object):\n\n def test_pdb_set_trace_interception(self, testdir):\n p1 = testdir.makepyfile(\n \"\"\"\n import pdb\n def test_1():\n pdb.set_trace()\n \"\"\"\n )\n child = testdir.spawn_pytest(str(p1))\n child.expect(\"test_1\")\n child.expect(\"Pdb\")\n child.sendeof()\n rest = child.read().decode(\"utf8\")\n assert \"no tests ran\" in rest\n assert \"reading from stdin while output\" not in rest\n assert \"BdbQuit\" not in rest\n self.flush(child)\n\n def test_pdb_and_capsys(self, testdir):\n p1 = testdir.makepyfile(\n \"\"\"\n import pytest\n def test_1(capsys):\n print(\"hello1\")\n pytest.set_trace()\n \"\"\"\n )\n child = testdir.spawn_pytest(str(p1))\n child.expect(\"test_1\")\n child.send(\"capsys.readouterr()\\n\")\n child.expect(\"hello1\")\n child.sendeof()\n child.read()\n self.flush(child)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_with_caplog_on_pdb_invocation_TestPDB.test_pdb_with_caplog_on_pdb_invocation.self_flush_child_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_with_caplog_on_pdb_invocation_TestPDB.test_pdb_with_caplog_on_pdb_invocation.self_flush_child_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 424, "end_line": 440, "span_ids": ["TestPDB.test_pdb_with_caplog_on_pdb_invocation"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPDB(object):\n\n def test_pdb_with_caplog_on_pdb_invocation(self, testdir):\n p1 = testdir.makepyfile(\n \"\"\"\n def test_1(capsys, caplog):\n import logging\n logging.getLogger(__name__).warning(\"some_warning\")\n assert 0\n \"\"\"\n )\n child = testdir.spawn_pytest(\"--pdb %s\" % str(p1))\n child.send(\"caplog.record_tuples\\n\")\n child.expect_exact(\n \"[('test_pdb_with_caplog_on_pdb_invocation', 30, 'some_warning')]\"\n )\n child.sendeof()\n child.read()\n self.flush(child)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_set_trace_capturing_afterwards_TestPDB.test_set_trace_capturing_afterwards.self_flush_child_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_set_trace_capturing_afterwards_TestPDB.test_set_trace_capturing_afterwards.self_flush_child_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 442, "end_line": 461, "span_ids": ["TestPDB.test_set_trace_capturing_afterwards"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPDB(object):\n\n def test_set_trace_capturing_afterwards(self, testdir):\n p1 = testdir.makepyfile(\n \"\"\"\n import pdb\n def test_1():\n pdb.set_trace()\n def test_2():\n print(\"hello\")\n assert 0\n \"\"\"\n )\n child = testdir.spawn_pytest(str(p1))\n child.expect(\"test_1\")\n child.send(\"c\\n\")\n child.expect(\"test_2\")\n child.expect(\"Captured\")\n child.expect(\"hello\")\n child.sendeof()\n child.read()\n self.flush(child)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_interaction_doctest_TestPDB.test_pdb_interaction_doctest.self_flush_child_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_interaction_doctest_TestPDB.test_pdb_interaction_doctest.self_flush_child_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 463, "end_line": 486, "span_ids": ["TestPDB.test_pdb_interaction_doctest"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPDB(object):\n\n def test_pdb_interaction_doctest(self, testdir, monkeypatch):\n p1 = testdir.makepyfile(\n \"\"\"\n import pytest\n def function_1():\n '''\n >>> i = 0\n >>> assert i == 1\n '''\n \"\"\"\n )\n child = testdir.spawn_pytest(\"--doctest-modules --pdb %s\" % p1)\n child.expect(\"Pdb\")\n\n assert \"UNEXPECTED EXCEPTION: AssertionError()\" in child.before.decode(\"utf8\")\n\n child.sendline(\"'i=%i.' % i\")\n child.expect(\"Pdb\")\n assert \"\\r\\n'i=0.'\\r\\n\" in child.before.decode(\"utf8\")\n\n child.sendeof()\n rest = child.read().decode(\"utf8\")\n assert \"1 failed\" in rest\n self.flush(child)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_interaction_capturing_twice_TestPDB.test_pdb_interaction_capturing_twice.self_flush_child_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_interaction_capturing_twice_TestPDB.test_pdb_interaction_capturing_twice.self_flush_child_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 488, "end_line": 521, "span_ids": ["TestPDB.test_pdb_interaction_capturing_twice"], "tokens": 290}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPDB(object):\n\n def test_pdb_interaction_capturing_twice(self, testdir):\n p1 = testdir.makepyfile(\n \"\"\"\n import pytest\n def test_1():\n i = 0\n print(\"hello17\")\n pytest.set_trace()\n x = 3\n print(\"hello18\")\n pytest.set_trace()\n x = 4\n assert 0\n \"\"\"\n )\n child = testdir.spawn_pytest(str(p1))\n child.expect(r\"PDB set_trace \\(IO-capturing turned off\\)\")\n child.expect(\"test_1\")\n child.expect(\"x = 3\")\n child.expect(\"Pdb\")\n child.sendline(\"c\")\n child.expect(r\"PDB continue \\(IO-capturing resumed\\)\")\n child.expect(r\"PDB set_trace \\(IO-capturing turned off\\)\")\n child.expect(\"x = 4\")\n child.expect(\"Pdb\")\n child.sendline(\"c\")\n child.expect(\"_ test_1 _\")\n child.expect(\"def test_1\")\n rest = child.read().decode(\"utf8\")\n assert \"Captured stdout call\" in rest\n assert \"hello17\" in rest # out is captured\n assert \"hello18\" in rest # out is captured\n assert \"1 failed\" in rest\n self.flush(child)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_with_injected_do_debug_TestPDB.test_pdb_with_injected_do_debug.self_flush_child_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_with_injected_do_debug_TestPDB.test_pdb_with_injected_do_debug.self_flush_child_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 523, "end_line": 599, "span_ids": ["TestPDB.test_pdb_with_injected_do_debug"], "tokens": 663}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPDB(object):\n\n def test_pdb_with_injected_do_debug(self, testdir):\n \"\"\"Simulates pdbpp, which injects Pdb into do_debug, and uses\n self.__class__ in do_continue.\n \"\"\"\n p1 = testdir.makepyfile(\n mytest=\"\"\"\n import pdb\n import pytest\n\n count_continue = 0\n\n class CustomPdb(pdb.Pdb, object):\n def do_debug(self, arg):\n import sys\n import types\n\n if sys.version_info < (3, ):\n do_debug_func = pdb.Pdb.do_debug.im_func\n else:\n do_debug_func = pdb.Pdb.do_debug\n\n newglobals = do_debug_func.__globals__.copy()\n newglobals['Pdb'] = self.__class__\n orig_do_debug = types.FunctionType(\n do_debug_func.__code__, newglobals,\n do_debug_func.__name__, do_debug_func.__defaults__,\n )\n return orig_do_debug(self, arg)\n do_debug.__doc__ = pdb.Pdb.do_debug.__doc__\n\n def do_continue(self, *args, **kwargs):\n global count_continue\n count_continue += 1\n return super(CustomPdb, self).do_continue(*args, **kwargs)\n\n def foo():\n print(\"print_from_foo\")\n\n def test_1():\n i = 0\n print(\"hello17\")\n pytest.set_trace()\n x = 3\n print(\"hello18\")\n\n assert count_continue == 2, \"unexpected_failure: %d != 2\" % count_continue\n pytest.fail(\"expected_failure\")\n \"\"\"\n )\n child = testdir.spawn_pytest(\"--pdbcls=mytest:CustomPdb %s\" % str(p1))\n child.expect(r\"PDB set_trace \\(IO-capturing turned off\\)\")\n child.expect(r\"\\n\\(Pdb\")\n child.sendline(\"debug foo()\")\n child.expect(\"ENTERING RECURSIVE DEBUGGER\")\n child.expect(r\"\\n\\(\\(Pdb\")\n child.sendline(\"c\")\n child.expect(\"LEAVING RECURSIVE DEBUGGER\")\n assert b\"PDB continue\" not in child.before\n # No extra newline.\n assert child.before.endswith(b\"c\\r\\nprint_from_foo\\r\\n\")\n\n # set_debug should not raise outcomes.Exit, if used recrursively.\n child.sendline(\"debug 42\")\n child.sendline(\"q\")\n child.expect(\"LEAVING RECURSIVE DEBUGGER\")\n assert b\"ENTERING RECURSIVE DEBUGGER\" in child.before\n assert b\"Quitting debugger\" not in child.before\n\n child.sendline(\"c\")\n child.expect(r\"PDB continue \\(IO-capturing resumed\\)\")\n rest = child.read().decode(\"utf8\")\n assert \"hello17\" in rest # out is captured\n assert \"hello18\" in rest # out is captured\n assert \"1 failed\" in rest\n assert \"Failed: expected_failure\" in rest\n assert \"AssertionError: unexpected_failure\" not in rest\n self.flush(child)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_without_capture_TestPDB.test_pdb_without_capture.self_flush_child_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_without_capture_TestPDB.test_pdb_without_capture.self_flush_child_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 601, "end_line": 615, "span_ids": ["TestPDB.test_pdb_without_capture"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPDB(object):\n\n def test_pdb_without_capture(self, testdir):\n p1 = testdir.makepyfile(\n \"\"\"\n import pytest\n def test_1():\n pytest.set_trace()\n \"\"\"\n )\n child = testdir.spawn_pytest(\"-s %s\" % p1)\n child.expect(r\">>> PDB set_trace >>>\")\n child.expect(\"Pdb\")\n child.sendline(\"c\")\n child.expect(r\">>> PDB continue >>>\")\n child.expect(\"1 passed\")\n self.flush(child)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_continue_with_recursive_debug_TestPDB.test_pdb_continue_with_recursive_debug.assert_1_passed_in_in_r": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_continue_with_recursive_debug_TestPDB.test_pdb_continue_with_recursive_debug.assert_1_passed_in_in_r", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 617, "end_line": 707, "span_ids": ["TestPDB.test_pdb_continue_with_recursive_debug"], "tokens": 739}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPDB(object):\n\n @pytest.mark.parametrize(\"capture_arg\", (\"\", \"-s\", \"-p no:capture\"))\n def test_pdb_continue_with_recursive_debug(self, capture_arg, testdir):\n \"\"\"Full coverage for do_debug without capturing.\n\n This is very similar to test_pdb_interaction_continue_recursive in general,\n but mocks out ``pdb.set_trace`` for providing more coverage.\n \"\"\"\n p1 = testdir.makepyfile(\n \"\"\"\n try:\n input = raw_input\n except NameError:\n pass\n\n def set_trace():\n __import__('pdb').set_trace()\n\n def test_1(monkeypatch):\n import _pytest.debugging\n\n class pytestPDBTest(_pytest.debugging.pytestPDB):\n @classmethod\n def set_trace(cls, *args, **kwargs):\n # Init _PdbWrapper to handle capturing.\n _pdb = cls._init_pdb(*args, **kwargs)\n\n # Mock out pdb.Pdb.do_continue.\n import pdb\n pdb.Pdb.do_continue = lambda self, arg: None\n\n print(\"=== SET_TRACE ===\")\n assert input() == \"debug set_trace()\"\n\n # Simulate _PdbWrapper.do_debug\n cls._recursive_debug += 1\n print(\"ENTERING RECURSIVE DEBUGGER\")\n print(\"=== SET_TRACE_2 ===\")\n\n assert input() == \"c\"\n _pdb.do_continue(\"\")\n print(\"=== SET_TRACE_3 ===\")\n\n # Simulate _PdbWrapper.do_debug\n print(\"LEAVING RECURSIVE DEBUGGER\")\n cls._recursive_debug -= 1\n\n print(\"=== SET_TRACE_4 ===\")\n assert input() == \"c\"\n _pdb.do_continue(\"\")\n\n def do_continue(self, arg):\n print(\"=== do_continue\")\n # _PdbWrapper.do_continue(\"\")\n\n monkeypatch.setattr(_pytest.debugging, \"pytestPDB\", pytestPDBTest)\n\n import pdb\n monkeypatch.setattr(pdb, \"set_trace\", pytestPDBTest.set_trace)\n\n set_trace()\n \"\"\"\n )\n child = testdir.spawn_pytest(\"%s %s\" % (p1, capture_arg))\n child.expect(\"=== SET_TRACE ===\")\n before = child.before.decode(\"utf8\")\n if not capture_arg:\n assert \">>> PDB set_trace (IO-capturing turned off) >>>\" in before\n else:\n assert \">>> PDB set_trace >>>\" in before\n child.sendline(\"debug set_trace()\")\n child.expect(\"=== SET_TRACE_2 ===\")\n before = child.before.decode(\"utf8\")\n assert \"\\r\\nENTERING RECURSIVE DEBUGGER\\r\\n\" in before\n child.sendline(\"c\")\n child.expect(\"=== SET_TRACE_3 ===\")\n\n # No continue message with recursive debugging.\n before = child.before.decode(\"utf8\")\n assert \">>> PDB continue \" not in before\n\n child.sendline(\"c\")\n child.expect(\"=== SET_TRACE_4 ===\")\n before = child.before.decode(\"utf8\")\n assert \"\\r\\nLEAVING RECURSIVE DEBUGGER\\r\\n\" in before\n child.sendline(\"c\")\n rest = child.read().decode(\"utf8\")\n if not capture_arg:\n assert \"> PDB continue (IO-capturing resumed) >\" in rest\n else:\n assert \"> PDB continue >\" in rest\n assert \"1 passed in\" in rest", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_used_outside_test_TestPDB.test_pdb_collection_failure_is_shown.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_used_outside_test_TestPDB.test_pdb_collection_failure_is_shown.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 709, "end_line": 745, "span_ids": ["TestPDB.test_pdb_used_in_generate_tests", "TestPDB.test_pdb_collection_failure_is_shown", "TestPDB.test_pdb_used_outside_test"], "tokens": 272}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPDB(object):\n\n def test_pdb_used_outside_test(self, testdir):\n p1 = testdir.makepyfile(\n \"\"\"\n import pytest\n pytest.set_trace()\n x = 5\n \"\"\"\n )\n child = testdir.spawn(\"{} {}\".format(sys.executable, p1))\n child.expect(\"x = 5\")\n child.expect(\"Pdb\")\n child.sendeof()\n self.flush(child)\n\n def test_pdb_used_in_generate_tests(self, testdir):\n p1 = testdir.makepyfile(\n \"\"\"\n import pytest\n def pytest_generate_tests(metafunc):\n pytest.set_trace()\n x = 5\n def test_foo(a):\n pass\n \"\"\"\n )\n child = testdir.spawn_pytest(str(p1))\n child.expect(\"x = 5\")\n child.expect(\"Pdb\")\n child.sendeof()\n self.flush(child)\n\n def test_pdb_collection_failure_is_shown(self, testdir):\n p1 = testdir.makepyfile(\"xxx\")\n result = testdir.runpytest_subprocess(\"--pdb\", p1)\n result.stdout.fnmatch_lines(\n [\"E NameError: *xxx*\", \"*! *Exit: Quitting debugger !*\"] # due to EOF\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_enter_leave_pdb_hooks_are_called_TestPDB.test_enter_leave_pdb_hooks_are_called.self_flush_child_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_enter_leave_pdb_hooks_are_called_TestPDB.test_enter_leave_pdb_hooks_are_called.self_flush_child_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 747, "end_line": 790, "span_ids": ["TestPDB.test_enter_leave_pdb_hooks_are_called"], "tokens": 280}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPDB(object):\n\n def test_enter_leave_pdb_hooks_are_called(self, testdir):\n testdir.makeconftest(\n \"\"\"\n mypdb = None\n\n def pytest_configure(config):\n config.testing_verification = 'configured'\n\n def pytest_enter_pdb(config, pdb):\n assert config.testing_verification == 'configured'\n print('enter_pdb_hook')\n\n global mypdb\n mypdb = pdb\n mypdb.set_attribute = \"bar\"\n\n def pytest_leave_pdb(config, pdb):\n assert config.testing_verification == 'configured'\n print('leave_pdb_hook')\n\n global mypdb\n assert mypdb is pdb\n assert mypdb.set_attribute == \"bar\"\n \"\"\"\n )\n p1 = testdir.makepyfile(\n \"\"\"\n import pytest\n\n def test_foo():\n pytest.set_trace()\n assert 0\n \"\"\"\n )\n child = testdir.spawn_pytest(str(p1))\n child.expect(\"enter_pdb_hook\")\n child.sendline(\"c\")\n child.expect(r\"PDB continue \\(IO-capturing resumed\\)\")\n child.expect(\"Captured stdout call\")\n rest = child.read().decode(\"utf8\")\n assert \"leave_pdb_hook\" in rest\n assert \"1 failed\" in rest\n child.sendeof()\n self.flush(child)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_custom_cls_TestPDB.test_pdb_custom_cls_without_pdb.assert_custom_pdb_calls_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_custom_cls_TestPDB.test_pdb_custom_cls_without_pdb.assert_custom_pdb_calls_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 792, "end_line": 818, "span_ids": ["TestPDB.test_pdb_validate_usepdb_cls", "TestPDB.test_pdb_custom_cls_without_pdb", "TestPDB.test_pdb_custom_cls_invalid", "TestPDB.test_pdb_custom_cls"], "tokens": 308}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPDB(object):\n\n def test_pdb_custom_cls(self, testdir, custom_pdb_calls):\n p1 = testdir.makepyfile(\"\"\"xxx \"\"\")\n result = testdir.runpytest_inprocess(\"--pdb\", \"--pdbcls=_pytest:_CustomPdb\", p1)\n result.stdout.fnmatch_lines([\"*NameError*xxx*\", \"*1 error*\"])\n assert custom_pdb_calls == [\"init\", \"reset\", \"interaction\"]\n\n def test_pdb_custom_cls_invalid(self, testdir):\n result = testdir.runpytest_inprocess(\"--pdbcls=invalid\")\n result.stderr.fnmatch_lines(\n [\n \"*: error: argument --pdbcls: 'invalid' is not in the format 'modname:classname'\"\n ]\n )\n\n def test_pdb_validate_usepdb_cls(self, testdir):\n assert _validate_usepdb_cls(\"os.path:dirname.__name__\") == (\n \"os.path\",\n \"dirname.__name__\",\n )\n\n assert _validate_usepdb_cls(\"pdb:DoesNotExist\") == (\"pdb\", \"DoesNotExist\")\n\n def test_pdb_custom_cls_without_pdb(self, testdir, custom_pdb_calls):\n p1 = testdir.makepyfile(\"\"\"xxx \"\"\")\n result = testdir.runpytest_inprocess(\"--pdbcls=_pytest:_CustomPdb\", p1)\n result.stdout.fnmatch_lines([\"*NameError*xxx*\", \"*1 error*\"])\n assert custom_pdb_calls == []", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_custom_cls_with_settrace_TestPDB.test_pdb_custom_cls_with_settrace.self_flush_child_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestPDB.test_pdb_custom_cls_with_settrace_TestPDB.test_pdb_custom_cls_with_settrace.self_flush_child_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 820, "end_line": 847, "span_ids": ["TestPDB.test_pdb_custom_cls_with_settrace"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPDB(object):\n\n def test_pdb_custom_cls_with_settrace(self, testdir, monkeypatch):\n testdir.makepyfile(\n custom_pdb=\"\"\"\n class CustomPdb(object):\n def __init__(self, *args, **kwargs):\n skip = kwargs.pop(\"skip\")\n assert skip == [\"foo.*\"]\n print(\"__init__\")\n super(CustomPdb, self).__init__(*args, **kwargs)\n\n def set_trace(*args, **kwargs):\n print('custom set_trace>')\n \"\"\"\n )\n p1 = testdir.makepyfile(\n \"\"\"\n import pytest\n\n def test_foo():\n pytest.set_trace(skip=['foo.*'])\n \"\"\"\n )\n monkeypatch.setenv(\"PYTHONPATH\", str(testdir.tmpdir))\n child = testdir.spawn_pytest(\"--pdbcls=custom_pdb:CustomPdb %s\" % str(p1))\n\n child.expect(\"__init__\")\n child.expect(\"custom set_trace>\")\n self.flush(child)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestDebuggingBreakpoints_TestDebuggingBreakpoints.test_supports_breakpoint_module_global.None_2.assert_SUPPORTS_BREAKPOIN": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestDebuggingBreakpoints_TestDebuggingBreakpoints.test_supports_breakpoint_module_global.None_2.assert_SUPPORTS_BREAKPOIN", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 850, "end_line": 861, "span_ids": ["TestDebuggingBreakpoints.test_supports_breakpoint_module_global", "TestDebuggingBreakpoints"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDebuggingBreakpoints(object):\n def test_supports_breakpoint_module_global(self):\n \"\"\"\n Test that supports breakpoint global marks on Python 3.7+ and not on\n CPython 3.5, 2.7\n \"\"\"\n if sys.version_info.major == 3 and sys.version_info.minor >= 7:\n assert SUPPORTS_BREAKPOINT_BUILTIN is True\n if sys.version_info.major == 3 and sys.version_info.minor == 5:\n assert SUPPORTS_BREAKPOINT_BUILTIN is False\n if sys.version_info.major == 2 and sys.version_info.minor == 7:\n assert SUPPORTS_BREAKPOINT_BUILTIN is False", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestDebuggingBreakpoints.test_sys_breakpointhook_configure_and_unconfigure_TestDebuggingBreakpoints.test_sys_breakpointhook_configure_and_unconfigure.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestDebuggingBreakpoints.test_sys_breakpointhook_configure_and_unconfigure_TestDebuggingBreakpoints.test_sys_breakpointhook_configure_and_unconfigure.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 863, "end_line": 895, "span_ids": ["TestDebuggingBreakpoints.test_sys_breakpointhook_configure_and_unconfigure"], "tokens": 258}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDebuggingBreakpoints(object):\n\n @pytest.mark.skipif(\n not SUPPORTS_BREAKPOINT_BUILTIN, reason=\"Requires breakpoint() builtin\"\n )\n @pytest.mark.parametrize(\"arg\", [\"--pdb\", \"\"])\n def test_sys_breakpointhook_configure_and_unconfigure(self, testdir, arg):\n \"\"\"\n Test that sys.breakpointhook is set to the custom Pdb class once configured, test that\n hook is reset to system value once pytest has been unconfigured\n \"\"\"\n testdir.makeconftest(\n \"\"\"\n import sys\n from pytest import hookimpl\n from _pytest.debugging import pytestPDB\n\n def pytest_configure(config):\n config._cleanup.append(check_restored)\n\n def check_restored():\n assert sys.breakpointhook == sys.__breakpointhook__\n\n def test_check():\n assert sys.breakpointhook == pytestPDB.set_trace\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n def test_nothing(): pass\n \"\"\"\n )\n args = (arg,) if arg else ()\n result = testdir.runpytest_subprocess(*args)\n result.stdout.fnmatch_lines([\"*1 passed in *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestDebuggingBreakpoints.test_pdb_custom_cls_TestDebuggingBreakpoints.test_pdb_custom_cls.assert_custom_debugger_ho": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestDebuggingBreakpoints.test_pdb_custom_cls_TestDebuggingBreakpoints.test_pdb_custom_cls.assert_custom_debugger_ho", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 897, "end_line": 911, "span_ids": ["TestDebuggingBreakpoints.test_pdb_custom_cls"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDebuggingBreakpoints(object):\n\n @pytest.mark.skipif(\n not SUPPORTS_BREAKPOINT_BUILTIN, reason=\"Requires breakpoint() builtin\"\n )\n def test_pdb_custom_cls(self, testdir, custom_debugger_hook):\n p1 = testdir.makepyfile(\n \"\"\"\n def test_nothing():\n breakpoint()\n \"\"\"\n )\n result = testdir.runpytest_inprocess(\n \"--pdb\", \"--pdbcls=_pytest:_CustomDebugger\", p1\n )\n result.stdout.fnmatch_lines([\"*CustomDebugger*\", \"*1 passed*\"])\n assert custom_debugger_hook == [\"init\", \"set_trace\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestDebuggingBreakpoints.test_environ_custom_class_TestDebuggingBreakpoints.test_environ_custom_class.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestDebuggingBreakpoints.test_environ_custom_class_TestDebuggingBreakpoints.test_environ_custom_class.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 913, "end_line": 943, "span_ids": ["TestDebuggingBreakpoints.test_environ_custom_class"], "tokens": 226}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDebuggingBreakpoints(object):\n\n @pytest.mark.parametrize(\"arg\", [\"--pdb\", \"\"])\n @pytest.mark.skipif(\n not SUPPORTS_BREAKPOINT_BUILTIN, reason=\"Requires breakpoint() builtin\"\n )\n def test_environ_custom_class(self, testdir, custom_debugger_hook, arg):\n testdir.makeconftest(\n \"\"\"\n import os\n import sys\n\n os.environ['PYTHONBREAKPOINT'] = '_pytest._CustomDebugger.set_trace'\n\n def pytest_configure(config):\n config._cleanup.append(check_restored)\n\n def check_restored():\n assert sys.breakpointhook == sys.__breakpointhook__\n\n def test_check():\n import _pytest\n assert sys.breakpointhook is _pytest._CustomDebugger.set_trace\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n def test_nothing(): pass\n \"\"\"\n )\n args = (arg,) if arg else ()\n result = testdir.runpytest_subprocess(*args)\n result.stdout.fnmatch_lines([\"*1 passed in *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestDebuggingBreakpoints.test_sys_breakpoint_interception_TestDebuggingBreakpoints.test_sys_breakpoint_interception.TestPDB_flush_child_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestDebuggingBreakpoints.test_sys_breakpoint_interception_TestDebuggingBreakpoints.test_sys_breakpoint_interception.TestPDB_flush_child_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 943, "end_line": 964, "span_ids": ["TestDebuggingBreakpoints.test_sys_breakpoint_interception"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDebuggingBreakpoints(object):\n\n @pytest.mark.skipif(\n not SUPPORTS_BREAKPOINT_BUILTIN, reason=\"Requires breakpoint() builtin\"\n )\n @pytest.mark.skipif(\n not _ENVIRON_PYTHONBREAKPOINT == \"\",\n reason=\"Requires breakpoint() default value\",\n )\n def test_sys_breakpoint_interception(self, testdir):\n p1 = testdir.makepyfile(\n \"\"\"\n def test_1():\n breakpoint()\n \"\"\"\n )\n child = testdir.spawn_pytest(str(p1))\n child.expect(\"test_1\")\n child.expect(\"Pdb\")\n child.sendeof()\n rest = child.read().decode(\"utf8\")\n assert \"Quitting debugger\" in rest\n assert \"reading from stdin while output\" not in rest\n TestPDB.flush(child)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestDebuggingBreakpoints.test_pdb_not_altered_TestDebuggingBreakpoints.test_pdb_not_altered.TestPDB_flush_child_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestDebuggingBreakpoints.test_pdb_not_altered_TestDebuggingBreakpoints.test_pdb_not_altered.TestPDB_flush_child_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 968, "end_line": 987, "span_ids": ["TestDebuggingBreakpoints.test_pdb_not_altered"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDebuggingBreakpoints(object):\n\n @pytest.mark.skipif(\n not SUPPORTS_BREAKPOINT_BUILTIN, reason=\"Requires breakpoint() builtin\"\n )\n def test_pdb_not_altered(self, testdir):\n p1 = testdir.makepyfile(\n \"\"\"\n import pdb\n def test_1():\n pdb.set_trace()\n assert 0\n \"\"\"\n )\n child = testdir.spawn_pytest(str(p1))\n child.expect(\"test_1\")\n child.expect(\"Pdb\")\n child.sendline(\"c\")\n rest = child.read().decode(\"utf8\")\n assert \"1 failed\" in rest\n assert \"reading from stdin while output\" not in rest\n TestPDB.flush(child)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestTraceOption_TestTraceOption.test_trace_sets_breakpoint.TestPDB_flush_child_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_TestTraceOption_TestTraceOption.test_trace_sets_breakpoint.TestPDB_flush_child_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 990, "end_line": 1020, "span_ids": ["TestTraceOption", "TestTraceOption.test_trace_sets_breakpoint"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTraceOption:\n def test_trace_sets_breakpoint(self, testdir):\n p1 = testdir.makepyfile(\n \"\"\"\n def test_1():\n assert True\n\n def test_2():\n pass\n\n def test_3():\n pass\n \"\"\"\n )\n child = testdir.spawn_pytest(\"--trace \" + str(p1))\n child.expect(\"test_1\")\n child.expect(\"Pdb\")\n child.sendline(\"c\")\n child.expect(\"test_2\")\n child.expect(\"Pdb\")\n child.sendline(\"c\")\n child.expect(\"test_3\")\n child.expect(\"Pdb\")\n child.sendline(\"q\")\n child.expect_exact(\"Exit: Quitting debugger\")\n rest = child.read().decode(\"utf8\")\n assert \"2 passed in\" in rest\n assert \"reading from stdin while output\" not in rest\n # Only printed once - not on stderr.\n assert \"Exit: Quitting debugger\" not in child.before.decode(\"utf8\")\n TestPDB.flush(child)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_test_quit_with_swallowed_SystemExit_test_quit_with_swallowed_SystemExit.TestPDB_flush_child_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_test_quit_with_swallowed_SystemExit_test_quit_with_swallowed_SystemExit.TestPDB_flush_child_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 1054, "end_line": 1079, "span_ids": ["test_quit_with_swallowed_SystemExit"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_quit_with_swallowed_SystemExit(testdir):\n \"\"\"Test that debugging's pytest_configure is re-entrant.\"\"\"\n p1 = testdir.makepyfile(\n \"\"\"\n def call_pdb_set_trace():\n __import__('pdb').set_trace()\n\n\n def test_1():\n try:\n call_pdb_set_trace()\n except SystemExit:\n pass\n\n\n def test_2():\n pass\n \"\"\"\n )\n child = testdir.spawn_pytest(str(p1))\n child.expect(\"Pdb\")\n child.sendline(\"q\")\n child.expect_exact(\"Exit: Quitting debugger\")\n rest = child.read().decode(\"utf8\")\n assert \"no tests ran\" in rest\n TestPDB.flush(child)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_test_pdb_suspends_fixture_capturing_test_pdb_suspends_fixture_capturing.assert_PDB_continue_I": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_test_pdb_suspends_fixture_capturing_test_pdb_suspends_fixture_capturing.assert_PDB_continue_I", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 1082, "end_line": 1128, "span_ids": ["test_pdb_suspends_fixture_capturing"], "tokens": 371}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"fixture\", (\"capfd\", \"capsys\"))\ndef test_pdb_suspends_fixture_capturing(testdir, fixture):\n \"\"\"Using \"-s\" with pytest should suspend/resume fixture capturing.\"\"\"\n p1 = testdir.makepyfile(\n \"\"\"\n def test_inner({fixture}):\n import sys\n\n print(\"out_inner_before\")\n sys.stderr.write(\"err_inner_before\\\\n\")\n\n __import__(\"pdb\").set_trace()\n\n print(\"out_inner_after\")\n sys.stderr.write(\"err_inner_after\\\\n\")\n\n out, err = {fixture}.readouterr()\n assert out ==\"out_inner_before\\\\nout_inner_after\\\\n\"\n assert err ==\"err_inner_before\\\\nerr_inner_after\\\\n\"\n \"\"\".format(\n fixture=fixture\n )\n )\n\n child = testdir.spawn_pytest(str(p1) + \" -s\")\n\n child.expect(\"Pdb\")\n before = child.before.decode(\"utf8\")\n assert (\n \"> PDB set_trace (IO-capturing turned off for fixture %s) >\" % (fixture)\n in before\n )\n\n # Test that capturing is really suspended.\n child.sendline(\"p 40 + 2\")\n child.expect(\"Pdb\")\n assert \"\\r\\n42\\r\\n\" in child.before.decode(\"utf8\")\n\n child.sendline(\"c\")\n rest = child.read().decode(\"utf8\")\n assert \"out_inner\" not in rest\n assert \"err_inner\" not in rest\n\n TestPDB.flush(child)\n assert child.exitstatus == 0\n assert \"= 1 passed in \" in rest\n assert \"> PDB continue (IO-capturing resumed for fixture %s) >\" % (fixture) in rest", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py__encoding_UTF_8_TestPytestPluginInteractions.test_addhooks_conftestplugin.assert_res_11_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py__encoding_UTF_8_TestPytestPluginInteractions.test_addhooks_conftestplugin.assert_res_11_", "embedding": null, "metadata": {"file_path": "testing/test_pluginmanager.py", "file_name": "test_pluginmanager.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 47, "span_ids": ["TestPytestPluginInteractions.test_addhooks_conftestplugin", "TestPytestPluginInteractions", "docstring", "imports", "pytestpm"], "tokens": 286}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# encoding: UTF-8\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport types\n\nimport pytest\nfrom _pytest.config import PytestPluginManager\nfrom _pytest.config.exceptions import UsageError\nfrom _pytest.main import EXIT_NOTESTSCOLLECTED\nfrom _pytest.main import Session\n\n\n@pytest.fixture\ndef pytestpm():\n return PytestPluginManager()\n\n\nclass TestPytestPluginInteractions(object):\n def test_addhooks_conftestplugin(self, testdir, _config_for_test):\n testdir.makepyfile(\n newhooks=\"\"\"\n def pytest_myhook(xyz):\n \"new hook\"\n \"\"\"\n )\n conf = testdir.makeconftest(\n \"\"\"\n import newhooks\n def pytest_addhooks(pluginmanager):\n pluginmanager.add_hookspecs(newhooks)\n def pytest_myhook(xyz):\n return xyz + 1\n \"\"\"\n )\n config = _config_for_test\n pm = config.pluginmanager\n pm.hook.pytest_addhooks.call_historic(\n kwargs=dict(pluginmanager=config.pluginmanager)\n )\n config.pluginmanager._importconftest(conf)\n # print(config.pluginmanager.get_plugins())\n res = config.hook.pytest_myhook(xyz=10)\n assert res == [11]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginInteractions.test_addhooks_nohooks_TestPytestPluginInteractions.test_do_option_postinitialize.assert_config_option_test": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginInteractions.test_addhooks_nohooks_TestPytestPluginInteractions.test_do_option_postinitialize.assert_config_option_test", "embedding": null, "metadata": {"file_path": "testing/test_pluginmanager.py", "file_name": "test_pluginmanager.py", "file_type": "text/x-python", "category": "test", "start_line": 49, "end_line": 72, "span_ids": ["TestPytestPluginInteractions.test_do_option_postinitialize", "TestPytestPluginInteractions.test_addhooks_nohooks"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPytestPluginInteractions(object):\n\n def test_addhooks_nohooks(self, testdir):\n testdir.makeconftest(\n \"\"\"\n import sys\n def pytest_addhooks(pluginmanager):\n pluginmanager.add_hookspecs(sys)\n \"\"\"\n )\n res = testdir.runpytest()\n assert res.ret != 0\n res.stderr.fnmatch_lines([\"*did not find*sys*\"])\n\n def test_do_option_postinitialize(self, testdir):\n config = testdir.parseconfigure()\n assert not hasattr(config.option, \"test123\")\n p = testdir.makepyfile(\n \"\"\"\n def pytest_addoption(parser):\n parser.addoption('--test123', action=\"store_true\",\n default=True)\n \"\"\"\n )\n config.pluginmanager._importconftest(p)\n assert config.option.test123", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginInteractions.test_configure_TestPytestPluginInteractions.test_configure.None_4": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginInteractions.test_configure_TestPytestPluginInteractions.test_configure.None_4", "embedding": null, "metadata": {"file_path": "testing/test_pluginmanager.py", "file_name": "test_pluginmanager.py", "file_type": "text/x-python", "category": "test", "start_line": 74, "end_line": 92, "span_ids": ["TestPytestPluginInteractions.test_configure", "TestPytestPluginInteractions.test_configure.A.pytest_configure", "TestPytestPluginInteractions.test_configure.A"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPytestPluginInteractions(object):\n\n def test_configure(self, testdir):\n config = testdir.parseconfig()\n values = []\n\n class A(object):\n def pytest_configure(self, config):\n values.append(self)\n\n config.pluginmanager.register(A())\n assert len(values) == 0\n config._do_configure()\n assert len(values) == 1\n config.pluginmanager.register(A()) # leads to a configured() plugin\n assert len(values) == 2\n assert values[0] != values[1]\n\n config._ensure_unconfigure()\n config.pluginmanager.register(A())\n assert len(values) == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginInteractions.test_hook_tracing_TestPytestPluginInteractions.test_hook_tracing.try_.finally_.undo_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginInteractions.test_hook_tracing_TestPytestPluginInteractions.test_hook_tracing.try_.finally_.undo_", "embedding": null, "metadata": {"file_path": "testing/test_pluginmanager.py", "file_name": "test_pluginmanager.py", "file_type": "text/x-python", "category": "test", "start_line": 94, "end_line": 125, "span_ids": ["TestPytestPluginInteractions.test_hook_tracing.api1.pytest_plugin_registered", "TestPytestPluginInteractions.test_hook_tracing.api1", "TestPytestPluginInteractions.test_hook_tracing", "TestPytestPluginInteractions.test_hook_tracing.api2", "TestPytestPluginInteractions.test_hook_tracing.api2.pytest_plugin_registered"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPytestPluginInteractions(object):\n\n def test_hook_tracing(self, _config_for_test):\n pytestpm = _config_for_test.pluginmanager # fully initialized with plugins\n saveindent = []\n\n class api1(object):\n def pytest_plugin_registered(self):\n saveindent.append(pytestpm.trace.root.indent)\n\n class api2(object):\n def pytest_plugin_registered(self):\n saveindent.append(pytestpm.trace.root.indent)\n raise ValueError()\n\n values = []\n pytestpm.trace.root.setwriter(values.append)\n undo = pytestpm.enable_tracing()\n try:\n indent = pytestpm.trace.root.indent\n p = api1()\n pytestpm.register(p)\n assert pytestpm.trace.root.indent == indent\n assert len(values) >= 2\n assert \"pytest_plugin_registered\" in values[0]\n assert \"finish\" in values[1]\n\n values[:] = []\n with pytest.raises(ValueError):\n pytestpm.register(api2())\n assert pytestpm.trace.root.indent == indent\n assert saveindent[0] > indent\n finally:\n undo()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginInteractions.test_hook_proxy_TestPytestPluginInteractions.test_hook_proxy.assert_ihook_a_is_not_iho": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginInteractions.test_hook_proxy_TestPytestPluginInteractions.test_hook_proxy.assert_ihook_a_is_not_iho", "embedding": null, "metadata": {"file_path": "testing/test_pluginmanager.py", "file_name": "test_pluginmanager.py", "file_type": "text/x-python", "category": "test", "start_line": 127, "end_line": 141, "span_ids": ["TestPytestPluginInteractions.test_hook_proxy"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPytestPluginInteractions(object):\n\n def test_hook_proxy(self, testdir):\n \"\"\"Test the gethookproxy function(#2016)\"\"\"\n config = testdir.parseconfig()\n session = Session(config)\n testdir.makepyfile(**{\"tests/conftest.py\": \"\", \"tests/subdir/conftest.py\": \"\"})\n\n conftest1 = testdir.tmpdir.join(\"tests/conftest.py\")\n conftest2 = testdir.tmpdir.join(\"tests/subdir/conftest.py\")\n\n config.pluginmanager._importconftest(conftest1)\n ihook_a = session.gethookproxy(testdir.tmpdir.join(\"tests\"))\n assert ihook_a is not None\n config.pluginmanager._importconftest(conftest2)\n ihook_b = session.gethookproxy(testdir.tmpdir.join(\"tests\"))\n assert ihook_a is not ihook_b", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_test_default_markers_test_importplugin_error_message.assert_in_test_traceback": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_test_default_markers_test_importplugin_error_message.assert_in_test_traceback", "embedding": null, "metadata": {"file_path": "testing/test_pluginmanager.py", "file_name": "test_pluginmanager.py", "file_type": "text/x-python", "category": "test", "start_line": 144, "end_line": 170, "span_ids": ["test_default_markers", "test_importplugin_error_message"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_default_markers(testdir):\n result = testdir.runpytest(\"--markers\")\n result.stdout.fnmatch_lines([\"*tryfirst*first*\", \"*trylast*last*\"])\n\n\ndef test_importplugin_error_message(testdir, pytestpm):\n \"\"\"Don't hide import errors when importing plugins and provide\n an easy to debug message.\n\n See #375 and #1998.\n \"\"\"\n testdir.syspathinsert(testdir.tmpdir)\n testdir.makepyfile(\n qwe=\"\"\"\n # encoding: UTF-8\n def test_traceback():\n raise ImportError(u'Not possible to import: \u263a')\n test_traceback()\n \"\"\"\n )\n with pytest.raises(ImportError) as excinfo:\n pytestpm.import_plugin(\"qwe\")\n\n assert str(excinfo.value).endswith(\n 'Error importing plugin \"qwe\": Not possible to import: \u263a'\n )\n assert \"in test_traceback\" in str(excinfo.traceback[-1])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManager_TestPytestPluginManager.test_canonical_import.assert_pm_is_registered_m": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManager_TestPytestPluginManager.test_canonical_import.assert_pm_is_registered_m", "embedding": null, "metadata": {"file_path": "testing/test_pluginmanager.py", "file_name": "test_pluginmanager.py", "file_type": "text/x-python", "category": "test", "start_line": 173, "end_line": 192, "span_ids": ["TestPytestPluginManager", "TestPytestPluginManager.test_canonical_import", "TestPytestPluginManager.test_register_imported_modules"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPytestPluginManager(object):\n def test_register_imported_modules(self):\n pm = PytestPluginManager()\n mod = types.ModuleType(\"x.y.pytest_hello\")\n pm.register(mod)\n assert pm.is_registered(mod)\n values = pm.get_plugins()\n assert mod in values\n pytest.raises(ValueError, pm.register, mod)\n pytest.raises(ValueError, lambda: pm.register(mod))\n # assert not pm.is_registered(mod2)\n assert pm.get_plugins() == values\n\n def test_canonical_import(self, monkeypatch):\n mod = types.ModuleType(\"pytest_xyz\")\n monkeypatch.setitem(sys.modules, \"pytest_xyz\", mod)\n pm = PytestPluginManager()\n pm.import_plugin(\"pytest_xyz\")\n assert pm.get_plugin(\"pytest_xyz\") == mod\n assert pm.is_registered(mod)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManager.test_consider_module_TestPytestPluginManager.test_consider_module.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManager.test_consider_module_TestPytestPluginManager.test_consider_module.None_1", "embedding": null, "metadata": {"file_path": "testing/test_pluginmanager.py", "file_name": "test_pluginmanager.py", "file_type": "text/x-python", "category": "test", "start_line": 194, "end_line": 202, "span_ids": ["TestPytestPluginManager.test_consider_module"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPytestPluginManager(object):\n\n def test_consider_module(self, testdir, pytestpm):\n testdir.syspathinsert()\n testdir.makepyfile(pytest_p1=\"#\")\n testdir.makepyfile(pytest_p2=\"#\")\n mod = types.ModuleType(\"temp\")\n mod.pytest_plugins = [\"pytest_p1\", \"pytest_p2\"]\n pytestpm.consider_module(mod)\n assert pytestpm.get_plugin(\"pytest_p1\").__name__ == \"pytest_p1\"\n assert pytestpm.get_plugin(\"pytest_p2\").__name__ == \"pytest_p2\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManager.test_consider_module_import_module_TestPytestPluginManager.test_consider_module_import_module.assert_len_values_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManager.test_consider_module_import_module_TestPytestPluginManager.test_consider_module_import_module.assert_len_values_1", "embedding": null, "metadata": {"file_path": "testing/test_pluginmanager.py", "file_name": "test_pluginmanager.py", "file_type": "text/x-python", "category": "test", "start_line": 204, "end_line": 218, "span_ids": ["TestPytestPluginManager.test_consider_module_import_module"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPytestPluginManager(object):\n\n def test_consider_module_import_module(self, testdir, _config_for_test):\n pytestpm = _config_for_test.pluginmanager\n mod = types.ModuleType(\"x\")\n mod.pytest_plugins = \"pytest_a\"\n aplugin = testdir.makepyfile(pytest_a=\"#\")\n reprec = testdir.make_hook_recorder(pytestpm)\n testdir.syspathinsert(aplugin.dirpath())\n pytestpm.consider_module(mod)\n call = reprec.getcall(pytestpm.hook.pytest_plugin_registered.name)\n assert call.plugin.__name__ == \"pytest_a\"\n\n # check that it is not registered twice\n pytestpm.consider_module(mod)\n values = reprec.getcalls(\"pytest_plugin_registered\")\n assert len(values) == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManager.test_consider_env_fails_to_import_TestPytestPluginManager.test_plugin_skip.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManager.test_consider_env_fails_to_import_TestPytestPluginManager.test_plugin_skip.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_pluginmanager.py", "file_name": "test_pluginmanager.py", "file_type": "text/x-python", "category": "test", "start_line": 220, "end_line": 239, "span_ids": ["TestPytestPluginManager.test_consider_env_fails_to_import", "TestPytestPluginManager.test_plugin_skip"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPytestPluginManager(object):\n\n def test_consider_env_fails_to_import(self, monkeypatch, pytestpm):\n monkeypatch.setenv(\"PYTEST_PLUGINS\", \"nonexisting\", prepend=\",\")\n with pytest.raises(ImportError):\n pytestpm.consider_env()\n\n @pytest.mark.filterwarnings(\"always\")\n def test_plugin_skip(self, testdir, monkeypatch):\n p = testdir.makepyfile(\n skipping1=\"\"\"\n import pytest\n pytest.skip(\"hello\", allow_module_level=True)\n \"\"\"\n )\n p.copy(p.dirpath(\"skipping2.py\"))\n monkeypatch.setenv(\"PYTEST_PLUGINS\", \"skipping2\")\n result = testdir.runpytest(\"-rw\", \"-p\", \"skipping1\", syspathinsert=True)\n assert result.ret == EXIT_NOTESTSCOLLECTED\n result.stdout.fnmatch_lines(\n [\"*skipped plugin*skipping1*hello*\", \"*skipped plugin*skipping2*hello*\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManager.test_consider_env_plugin_instantiation_TestPytestPluginManager.test_consider_env_plugin_instantiation.assert_l2_l3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManager.test_consider_env_plugin_instantiation_TestPytestPluginManager.test_consider_env_plugin_instantiation.assert_l2_l3", "embedding": null, "metadata": {"file_path": "testing/test_pluginmanager.py", "file_name": "test_pluginmanager.py", "file_type": "text/x-python", "category": "test", "start_line": 241, "end_line": 252, "span_ids": ["TestPytestPluginManager.test_consider_env_plugin_instantiation"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPytestPluginManager(object):\n\n def test_consider_env_plugin_instantiation(self, testdir, monkeypatch, pytestpm):\n testdir.syspathinsert()\n testdir.makepyfile(xy123=\"#\")\n monkeypatch.setitem(os.environ, \"PYTEST_PLUGINS\", \"xy123\")\n l1 = len(pytestpm.get_plugins())\n pytestpm.consider_env()\n l2 = len(pytestpm.get_plugins())\n assert l2 == l1 + 1\n assert pytestpm.get_plugin(\"xy123\")\n pytestpm.consider_env()\n l3 = len(pytestpm.get_plugins())\n assert l2 == l3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManager.test_pluginmanager_ENV_startup_TestPytestPluginManager.test_pluginmanager_ENV_startup.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManager.test_pluginmanager_ENV_startup_TestPytestPluginManager.test_pluginmanager_ENV_startup.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_pluginmanager.py", "file_name": "test_pluginmanager.py", "file_type": "text/x-python", "category": "test", "start_line": 254, "end_line": 267, "span_ids": ["TestPytestPluginManager.test_pluginmanager_ENV_startup"], "tokens": 136}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPytestPluginManager(object):\n\n def test_pluginmanager_ENV_startup(self, testdir, monkeypatch):\n testdir.makepyfile(pytest_x500=\"#\")\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n def test_hello(pytestconfig):\n plugin = pytestconfig.pluginmanager.get_plugin('pytest_x500')\n assert plugin is not None\n \"\"\"\n )\n monkeypatch.setenv(\"PYTEST_PLUGINS\", \"pytest_x500\", prepend=\",\")\n result = testdir.runpytest(p, syspathinsert=True)\n assert result.ret == 0\n result.stdout.fnmatch_lines([\"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManager.test_import_plugin_importname_TestPytestPluginManager.test_import_plugin_importname.assert_plugin2_is_plugin1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManager.test_import_plugin_importname_TestPytestPluginManager.test_import_plugin_importname.assert_plugin2_is_plugin1", "embedding": null, "metadata": {"file_path": "testing/test_pluginmanager.py", "file_name": "test_pluginmanager.py", "file_type": "text/x-python", "category": "test", "start_line": 269, "end_line": 284, "span_ids": ["TestPytestPluginManager.test_import_plugin_importname"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPytestPluginManager(object):\n\n def test_import_plugin_importname(self, testdir, pytestpm):\n pytest.raises(ImportError, pytestpm.import_plugin, \"qweqwex.y\")\n pytest.raises(ImportError, pytestpm.import_plugin, \"pytest_qweqwx.y\")\n\n testdir.syspathinsert()\n pluginname = \"pytest_hello\"\n testdir.makepyfile(**{pluginname: \"\"})\n pytestpm.import_plugin(\"pytest_hello\")\n len1 = len(pytestpm.get_plugins())\n pytestpm.import_plugin(\"pytest_hello\")\n len2 = len(pytestpm.get_plugins())\n assert len1 == len2\n plugin1 = pytestpm.get_plugin(\"pytest_hello\")\n assert plugin1.__name__.endswith(\"pytest_hello\")\n plugin2 = pytestpm.get_plugin(\"pytest_hello\")\n assert plugin2 is plugin1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManager.test_import_plugin_dotted_name_TestPytestPluginManager.test_consider_conftest_deps.with_pytest_raises_Import.pytestpm_consider_conftes": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManager.test_import_plugin_dotted_name_TestPytestPluginManager.test_consider_conftest_deps.with_pytest_raises_Import.pytestpm_consider_conftes", "embedding": null, "metadata": {"file_path": "testing/test_pluginmanager.py", "file_name": "test_pluginmanager.py", "file_type": "text/x-python", "category": "test", "start_line": 286, "end_line": 300, "span_ids": ["TestPytestPluginManager.test_consider_conftest_deps", "TestPytestPluginManager.test_import_plugin_dotted_name"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPytestPluginManager(object):\n\n def test_import_plugin_dotted_name(self, testdir, pytestpm):\n pytest.raises(ImportError, pytestpm.import_plugin, \"qweqwex.y\")\n pytest.raises(ImportError, pytestpm.import_plugin, \"pytest_qweqwex.y\")\n\n testdir.syspathinsert()\n testdir.mkpydir(\"pkg\").join(\"plug.py\").write(\"x=3\")\n pluginname = \"pkg.plug\"\n pytestpm.import_plugin(pluginname)\n mod = pytestpm.get_plugin(\"pkg.plug\")\n assert mod.x == 3\n\n def test_consider_conftest_deps(self, testdir, pytestpm):\n mod = testdir.makepyfile(\"pytest_plugins='xyz'\").pyimport()\n with pytest.raises(ImportError):\n pytestpm.consider_conftest(mod)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManagerBootstrapming_TestPytestPluginManagerBootstrapming.test_preparse_args.with_pytest_raises_UsageE.pytestpm_consider_prepars": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManagerBootstrapming_TestPytestPluginManagerBootstrapming.test_preparse_args.with_pytest_raises_UsageE.pytestpm_consider_prepars", "embedding": null, "metadata": {"file_path": "testing/test_pluginmanager.py", "file_name": "test_pluginmanager.py", "file_type": "text/x-python", "category": "test", "start_line": 303, "end_line": 319, "span_ids": ["TestPytestPluginManagerBootstrapming.test_preparse_args", "TestPytestPluginManagerBootstrapming"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPytestPluginManagerBootstrapming(object):\n def test_preparse_args(self, pytestpm):\n pytest.raises(\n ImportError, lambda: pytestpm.consider_preparse([\"xyz\", \"-p\", \"hello123\"])\n )\n\n # Handles -p without space (#3532).\n with pytest.raises(ImportError) as excinfo:\n pytestpm.consider_preparse([\"-phello123\"])\n assert '\"hello123\"' in excinfo.value.args[0]\n pytestpm.consider_preparse([\"-pno:hello123\"])\n\n # Handles -p without following arg (when used without argparse).\n pytestpm.consider_preparse([\"-p\"])\n\n with pytest.raises(UsageError, match=\"^plugin main cannot be disabled$\"):\n pytestpm.consider_preparse([\"-p\", \"no:main\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManagerBootstrapming.test_plugin_prevent_register_TestPytestPluginManagerBootstrapming.test_plugin_prevent_register_unregistered_alredy_registered.assert_42_not_in_l2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManagerBootstrapming.test_plugin_prevent_register_TestPytestPluginManagerBootstrapming.test_plugin_prevent_register_unregistered_alredy_registered.assert_42_not_in_l2", "embedding": null, "metadata": {"file_path": "testing/test_pluginmanager.py", "file_name": "test_pluginmanager.py", "file_type": "text/x-python", "category": "test", "start_line": 321, "end_line": 335, "span_ids": ["TestPytestPluginManagerBootstrapming.test_plugin_prevent_register_unregistered_alredy_registered", "TestPytestPluginManagerBootstrapming.test_plugin_prevent_register"], "tokens": 170}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPytestPluginManagerBootstrapming(object):\n\n def test_plugin_prevent_register(self, pytestpm):\n pytestpm.consider_preparse([\"xyz\", \"-p\", \"no:abc\"])\n l1 = pytestpm.get_plugins()\n pytestpm.register(42, name=\"abc\")\n l2 = pytestpm.get_plugins()\n assert len(l2) == len(l1)\n assert 42 not in l2\n\n def test_plugin_prevent_register_unregistered_alredy_registered(self, pytestpm):\n pytestpm.register(42, name=\"abc\")\n l1 = pytestpm.get_plugins()\n assert 42 in l1\n pytestpm.consider_preparse([\"xyz\", \"-p\", \"no:abc\"])\n l2 = pytestpm.get_plugins()\n assert 42 not in l2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManagerBootstrapming.test_plugin_prevent_register_stepwise_on_cacheprovider_unregister_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pluginmanager.py_TestPytestPluginManagerBootstrapming.test_plugin_prevent_register_stepwise_on_cacheprovider_unregister_", "embedding": null, "metadata": {"file_path": "testing/test_pluginmanager.py", "file_name": "test_pluginmanager.py", "file_type": "text/x-python", "category": "test", "start_line": 337, "end_line": 361, "span_ids": ["TestPytestPluginManagerBootstrapming.test_plugin_prevent_register_stepwise_on_cacheprovider_unregister", "TestPytestPluginManagerBootstrapming.test_blocked_plugin_can_be_used"], "tokens": 253}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestPytestPluginManagerBootstrapming(object):\n\n def test_plugin_prevent_register_stepwise_on_cacheprovider_unregister(\n self, pytestpm\n ):\n \"\"\" From PR #4304 : The only way to unregister a module is documented at\n the end of https://docs.pytest.org/en/latest/plugins.html.\n\n When unregister cacheprovider, then unregister stepwise too\n \"\"\"\n pytestpm.register(42, name=\"cacheprovider\")\n pytestpm.register(43, name=\"stepwise\")\n l1 = pytestpm.get_plugins()\n assert 42 in l1\n assert 43 in l1\n pytestpm.consider_preparse([\"xyz\", \"-p\", \"no:cacheprovider\"])\n l2 = pytestpm.get_plugins()\n assert 42 not in l2\n assert 43 not in l2\n\n def test_blocked_plugin_can_be_used(self, pytestpm):\n pytestpm.consider_preparse([\"xyz\", \"-p\", \"no:abc\", \"-p\", \"abc\"])\n\n assert pytestpm.has_plugin(\"abc\")\n assert not pytestpm.is_blocked(\"abc\")\n assert not pytestpm.is_blocked(\"pytest_abc\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py__coding_utf_8__None_18": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py__coding_utf_8__None_18", "embedding": null, "metadata": {"file_path": "testing/test_pytester.py", "file_name": "test_pytester.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 23, "span_ids": ["imports", "docstring"], "tokens": 146}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport subprocess\nimport sys\nimport time\n\nimport py.path\n\nimport _pytest.pytester as pytester\nimport pytest\nfrom _pytest.config import PytestPluginManager\nfrom _pytest.main import EXIT_NOTESTSCOLLECTED\nfrom _pytest.main import EXIT_OK\nfrom _pytest.main import EXIT_TESTSFAILED\nfrom _pytest.pytester import CwdSnapshot\nfrom _pytest.pytester import HookRecorder\nfrom _pytest.pytester import LineMatcher\nfrom _pytest.pytester import SysModulesSnapshot\nfrom _pytest.pytester import SysPathsSnapshot", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_make_hook_recorder_test_make_hook_recorder.pytest_raises_ValueError_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_make_hook_recorder_test_make_hook_recorder.pytest_raises_ValueError_", "embedding": null, "metadata": {"file_path": "testing/test_pytester.py", "file_name": "test_pytester.py", "file_type": "text/x-python", "category": "test", "start_line": 26, "end_line": 76, "span_ids": ["test_make_hook_recorder.rep_1:2", "test_make_hook_recorder", "test_make_hook_recorder.rep_1", "test_make_hook_recorder.rep", "test_make_hook_recorder.rep:2"], "tokens": 365}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_make_hook_recorder(testdir):\n item = testdir.getitem(\"def test_func(): pass\")\n recorder = testdir.make_hook_recorder(item.config.pluginmanager)\n assert not recorder.getfailures()\n\n pytest.xfail(\"internal reportrecorder tests need refactoring\")\n\n class rep(object):\n excinfo = None\n passed = False\n failed = True\n skipped = False\n when = \"call\"\n\n recorder.hook.pytest_runtest_logreport(report=rep)\n failures = recorder.getfailures()\n assert failures == [rep]\n failures = recorder.getfailures()\n assert failures == [rep]\n\n class rep(object):\n excinfo = None\n passed = False\n failed = False\n skipped = True\n when = \"call\"\n\n rep.passed = False\n rep.skipped = True\n recorder.hook.pytest_runtest_logreport(report=rep)\n\n modcol = testdir.getmodulecol(\"\")\n rep = modcol.config.hook.pytest_make_collect_report(collector=modcol)\n rep.passed = False\n rep.failed = True\n rep.skipped = False\n recorder.hook.pytest_collectreport(report=rep)\n\n passed, skipped, failed = recorder.listoutcomes()\n assert not passed and skipped and failed\n\n numpassed, numskipped, numfailed = recorder.countoutcomes()\n assert numpassed == 0\n assert numskipped == 1\n assert numfailed == 1\n assert len(recorder.getfailedcollections()) == 1\n\n recorder.unregister()\n recorder.clear()\n recorder.hook.pytest_runtest_logreport(report=rep)\n pytest.raises(ValueError, recorder.getfailures)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_parseconfig_make_holder.return.apiclass_apimod": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_parseconfig_make_holder.return.apiclass_apimod", "embedding": null, "metadata": {"file_path": "testing/test_pytester.py", "file_name": "test_pytester.py", "file_type": "text/x-python", "category": "test", "start_line": 79, "end_line": 178, "span_ids": ["make_holder", "test_runresult_assertion_on_xfail", "test_runresult_assertion_on_xpassed", "make_holder.apiclass", "test_testdir_runs_with_plugin", "test_runresult_repr", "test_xpassed_with_strict_is_considered_a_failure", "make_holder.apiclass.pytest_xyz", "test_parseconfig"], "tokens": 521}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parseconfig(testdir):\n config1 = testdir.parseconfig()\n config2 = testdir.parseconfig()\n assert config2 != config1\n assert config1 != pytest.config\n\n\ndef test_testdir_runs_with_plugin(testdir):\n testdir.makepyfile(\n \"\"\"\n pytest_plugins = \"pytester\"\n def test_hello(testdir):\n assert 1\n \"\"\"\n )\n result = testdir.runpytest()\n result.assert_outcomes(passed=1)\n\n\ndef test_runresult_assertion_on_xfail(testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n pytest_plugins = \"pytester\"\n\n @pytest.mark.xfail\n def test_potato():\n assert False\n \"\"\"\n )\n result = testdir.runpytest()\n result.assert_outcomes(xfailed=1)\n assert result.ret == 0\n\n\ndef test_runresult_assertion_on_xpassed(testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n pytest_plugins = \"pytester\"\n\n @pytest.mark.xfail\n def test_potato():\n assert True\n \"\"\"\n )\n result = testdir.runpytest()\n result.assert_outcomes(xpassed=1)\n assert result.ret == 0\n\n\ndef test_runresult_repr():\n from _pytest.pytester import RunResult\n\n assert (\n repr(\n RunResult(ret=\"ret\", outlines=[\"\"], errlines=[\"some\", \"errors\"], duration=1)\n )\n == \"\"\n )\n\n\ndef test_xpassed_with_strict_is_considered_a_failure(testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n pytest_plugins = \"pytester\"\n\n @pytest.mark.xfail(strict=True)\n def test_potato():\n assert True\n \"\"\"\n )\n result = testdir.runpytest()\n result.assert_outcomes(failed=1)\n assert result.ret != 0\n\n\ndef make_holder():\n class apiclass(object):\n def pytest_xyz(self, arg):\n \"x\"\n\n def pytest_xyz_noarg(self):\n \"x\"\n\n apimod = type(os)(\"api\")\n\n def pytest_xyz(arg):\n \"x\"\n\n def pytest_xyz_noarg():\n \"x\"\n\n apimod.pytest_xyz = pytest_xyz\n apimod.pytest_xyz_noarg = pytest_xyz_noarg\n return apiclass, apimod", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_hookrecorder_basic_test_hookrecorder_basic.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_hookrecorder_basic_test_hookrecorder_basic.None_2", "embedding": null, "metadata": {"file_path": "testing/test_pytester.py", "file_name": "test_pytester.py", "file_type": "text/x-python", "category": "test", "start_line": 181, "end_line": 193, "span_ids": ["test_hookrecorder_basic"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"holder\", make_holder())\ndef test_hookrecorder_basic(holder):\n pm = PytestPluginManager()\n pm.add_hookspecs(holder)\n rec = HookRecorder(pm)\n pm.hook.pytest_xyz(arg=123)\n call = rec.popcall(\"pytest_xyz\")\n assert call.arg == 123\n assert call._name == \"pytest_xyz\"\n pytest.raises(pytest.fail.Exception, rec.popcall, \"abc\")\n pm.hook.pytest_xyz_noarg()\n call = rec.popcall(\"pytest_xyz_noarg\")\n assert call._name == \"pytest_xyz_noarg\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_makepyfile_unicode_test_makepyfile_utf8.assert_u_mixed_encoding_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_makepyfile_unicode_test_makepyfile_utf8.assert_u_mixed_encoding_", "embedding": null, "metadata": {"file_path": "testing/test_pytester.py", "file_name": "test_pytester.py", "file_type": "text/x-python", "category": "test", "start_line": 196, "end_line": 214, "span_ids": ["test_makepyfile_utf8", "test_makepyfile_unicode"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_makepyfile_unicode(testdir):\n global unichr\n try:\n unichr(65)\n except NameError:\n unichr = chr\n testdir.makepyfile(unichr(0xFFFD))\n\n\ndef test_makepyfile_utf8(testdir):\n \"\"\"Ensure makepyfile accepts utf-8 bytes as input (#2738)\"\"\"\n utf8_contents = u\"\"\"\n def setup_function(function):\n mixed_encoding = u'S\u00e3o Paulo'\n \"\"\".encode(\n \"utf-8\"\n )\n p = testdir.makepyfile(utf8_contents)\n assert u\"mixed_encoding = u'S\u00e3o Paulo'\".encode(\"utf-8\") in p.read(\"rb\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_TestInlineRunModulesCleanup_TestInlineRunModulesCleanup.spy_factory.return.SysModulesSnapshotSpy": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_TestInlineRunModulesCleanup_TestInlineRunModulesCleanup.spy_factory.return.SysModulesSnapshotSpy", "embedding": null, "metadata": {"file_path": "testing/test_pytester.py", "file_name": "test_pytester.py", "file_type": "text/x-python", "category": "test", "start_line": 217, "end_line": 241, "span_ids": ["TestInlineRunModulesCleanup.spy_factory.SysModulesSnapshotSpy:2", "TestInlineRunModulesCleanup", "TestInlineRunModulesCleanup.test_inline_run_test_module_not_cleaned_up", "TestInlineRunModulesCleanup.spy_factory.SysModulesSnapshotSpy", "TestInlineRunModulesCleanup.spy_factory"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestInlineRunModulesCleanup(object):\n def test_inline_run_test_module_not_cleaned_up(self, testdir):\n test_mod = testdir.makepyfile(\"def test_foo(): assert True\")\n result = testdir.inline_run(str(test_mod))\n assert result.ret == EXIT_OK\n # rewrite module, now test should fail if module was re-imported\n test_mod.write(\"def test_foo(): assert False\")\n result2 = testdir.inline_run(str(test_mod))\n assert result2.ret == EXIT_TESTSFAILED\n\n def spy_factory(self):\n class SysModulesSnapshotSpy(object):\n instances = []\n\n def __init__(self, preserve=None):\n SysModulesSnapshotSpy.instances.append(self)\n self._spy_restore_count = 0\n self._spy_preserve = preserve\n self.__snapshot = SysModulesSnapshot(preserve=preserve)\n\n def restore(self):\n self._spy_restore_count += 1\n return self.__snapshot.restore()\n\n return SysModulesSnapshotSpy", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_TestInlineRunModulesCleanup.test_inline_run_taking_and_restoring_a_sys_modules_snapshot_TestInlineRunModulesCleanup.test_inline_run_taking_and_restoring_a_sys_modules_snapshot.assert_all_sys_modules_x_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_TestInlineRunModulesCleanup.test_inline_run_taking_and_restoring_a_sys_modules_snapshot_TestInlineRunModulesCleanup.test_inline_run_taking_and_restoring_a_sys_modules_snapshot.assert_all_sys_modules_x_", "embedding": null, "metadata": {"file_path": "testing/test_pytester.py", "file_name": "test_pytester.py", "file_type": "text/x-python", "category": "test", "start_line": 243, "end_line": 262, "span_ids": ["TestInlineRunModulesCleanup.test_inline_run_taking_and_restoring_a_sys_modules_snapshot"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestInlineRunModulesCleanup(object):\n\n def test_inline_run_taking_and_restoring_a_sys_modules_snapshot(\n self, testdir, monkeypatch\n ):\n spy_factory = self.spy_factory()\n monkeypatch.setattr(pytester, \"SysModulesSnapshot\", spy_factory)\n original = dict(sys.modules)\n testdir.syspathinsert()\n testdir.makepyfile(import1=\"# you son of a silly person\")\n testdir.makepyfile(import2=\"# my hovercraft is full of eels\")\n test_mod = testdir.makepyfile(\n \"\"\"\n import import1\n def test_foo(): import import2\"\"\"\n )\n testdir.inline_run(str(test_mod))\n assert len(spy_factory.instances) == 1\n spy = spy_factory.instances[0]\n assert spy._spy_restore_count == 1\n assert sys.modules == original\n assert all(sys.modules[x] is original[x] for x in sys.modules)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_TestInlineRunModulesCleanup.test_inline_run_sys_modules_snapshot_restore_preserving_modules_TestInlineRunModulesCleanup.test_external_test_module_imports_not_cleaned_up.assert_imported_data_4": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_TestInlineRunModulesCleanup.test_inline_run_sys_modules_snapshot_restore_preserving_modules_TestInlineRunModulesCleanup.test_external_test_module_imports_not_cleaned_up.assert_imported_data_4", "embedding": null, "metadata": {"file_path": "testing/test_pytester.py", "file_name": "test_pytester.py", "file_type": "text/x-python", "category": "test", "start_line": 264, "end_line": 289, "span_ids": ["TestInlineRunModulesCleanup.test_external_test_module_imports_not_cleaned_up", "TestInlineRunModulesCleanup.test_inline_run_sys_modules_snapshot_restore_preserving_modules"], "tokens": 234}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestInlineRunModulesCleanup(object):\n\n def test_inline_run_sys_modules_snapshot_restore_preserving_modules(\n self, testdir, monkeypatch\n ):\n spy_factory = self.spy_factory()\n monkeypatch.setattr(pytester, \"SysModulesSnapshot\", spy_factory)\n test_mod = testdir.makepyfile(\"def test_foo(): pass\")\n testdir.inline_run(str(test_mod))\n spy = spy_factory.instances[0]\n assert not spy._spy_preserve(\"black_knight\")\n assert spy._spy_preserve(\"zope\")\n assert spy._spy_preserve(\"zope.interface\")\n assert spy._spy_preserve(\"zopelicious\")\n\n def test_external_test_module_imports_not_cleaned_up(self, testdir):\n testdir.syspathinsert()\n testdir.makepyfile(imported=\"data = 'you son of a silly person'\")\n import imported\n\n test_mod = testdir.makepyfile(\n \"\"\"\n def test_foo():\n import imported\n imported.data = 42\"\"\"\n )\n testdir.inline_run(str(test_mod))\n assert imported.data == 42", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_assert_outcomes_after_pytest_error_test_cwd_snapshot.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_assert_outcomes_after_pytest_error_test_cwd_snapshot.None_1", "embedding": null, "metadata": {"file_path": "testing/test_pytester.py", "file_name": "test_pytester.py", "file_type": "text/x-python", "category": "test", "start_line": 292, "end_line": 308, "span_ids": ["test_cwd_snapshot", "test_assert_outcomes_after_pytest_error"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_assert_outcomes_after_pytest_error(testdir):\n testdir.makepyfile(\"def test_foo(): assert True\")\n\n result = testdir.runpytest(\"--unexpected-argument\")\n with pytest.raises(ValueError, match=\"Pytest terminal report not found\"):\n result.assert_outcomes(passed=0)\n\n\ndef test_cwd_snapshot(tmpdir):\n foo = tmpdir.ensure(\"foo\", dir=1)\n bar = tmpdir.ensure(\"bar\", dir=1)\n foo.chdir()\n snapshot = CwdSnapshot()\n bar.chdir()\n assert py.path.local() == bar\n snapshot.restore()\n assert py.path.local() == foo", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_TestSysModulesSnapshot_TestSysModulesSnapshot.test_restore_reloaded.assert_sys_modules_ori": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_TestSysModulesSnapshot_TestSysModulesSnapshot.test_restore_reloaded.assert_sys_modules_ori", "embedding": null, "metadata": {"file_path": "testing/test_pytester.py", "file_name": "test_pytester.py", "file_type": "text/x-python", "category": "test", "start_line": 311, "end_line": 342, "span_ids": ["TestSysModulesSnapshot.test_remove_added", "TestSysModulesSnapshot.test_add_removed", "TestSysModulesSnapshot.test_restore_reloaded", "TestSysModulesSnapshot"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSysModulesSnapshot(object):\n key = \"my-test-module\"\n\n def test_remove_added(self):\n original = dict(sys.modules)\n assert self.key not in sys.modules\n snapshot = SysModulesSnapshot()\n sys.modules[self.key] = \"something\"\n assert self.key in sys.modules\n snapshot.restore()\n assert sys.modules == original\n\n def test_add_removed(self, monkeypatch):\n assert self.key not in sys.modules\n monkeypatch.setitem(sys.modules, self.key, \"something\")\n assert self.key in sys.modules\n original = dict(sys.modules)\n snapshot = SysModulesSnapshot()\n del sys.modules[self.key]\n assert self.key not in sys.modules\n snapshot.restore()\n assert sys.modules == original\n\n def test_restore_reloaded(self, monkeypatch):\n assert self.key not in sys.modules\n monkeypatch.setitem(sys.modules, self.key, \"something\")\n assert self.key in sys.modules\n original = dict(sys.modules)\n snapshot = SysModulesSnapshot()\n sys.modules[self.key] = \"something else\"\n snapshot.restore()\n assert sys.modules == original", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_TestSysModulesSnapshot.test_preserve_modules_TestSysModulesSnapshot.test_preserve_container.assert_sys_modules_ori": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_TestSysModulesSnapshot.test_preserve_modules_TestSysModulesSnapshot.test_preserve_container.assert_sys_modules_ori", "embedding": null, "metadata": {"file_path": "testing/test_pytester.py", "file_name": "test_pytester.py", "file_type": "text/x-python", "category": "test", "start_line": 344, "end_line": 370, "span_ids": ["TestSysModulesSnapshot.test_preserve_modules", "TestSysModulesSnapshot.test_preserve_container"], "tokens": 258}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSysModulesSnapshot(object):\n\n def test_preserve_modules(self, monkeypatch):\n key = [self.key + str(i) for i in range(3)]\n assert not any(k in sys.modules for k in key)\n for i, k in enumerate(key):\n monkeypatch.setitem(sys.modules, k, \"something\" + str(i))\n original = dict(sys.modules)\n\n def preserve(name):\n return name in (key[0], key[1], \"some-other-key\")\n\n snapshot = SysModulesSnapshot(preserve=preserve)\n sys.modules[key[0]] = original[key[0]] = \"something else0\"\n sys.modules[key[1]] = original[key[1]] = \"something else1\"\n sys.modules[key[2]] = \"something else2\"\n snapshot.restore()\n assert sys.modules == original\n\n def test_preserve_container(self, monkeypatch):\n original = dict(sys.modules)\n assert self.key not in original\n replacement = dict(sys.modules)\n replacement[self.key] = \"life of brian\"\n snapshot = SysModulesSnapshot()\n monkeypatch.setattr(sys, \"modules\", replacement)\n snapshot.restore()\n assert sys.modules is replacement\n assert sys.modules == original", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_TestSysPathsSnapshot_TestSysPathsSnapshot.test_restore.assert_getattr_sys_other": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_TestSysPathsSnapshot_TestSysPathsSnapshot.test_restore.assert_getattr_sys_other", "embedding": null, "metadata": {"file_path": "testing/test_pytester.py", "file_name": "test_pytester.py", "file_type": "text/x-python", "category": "test", "start_line": 373, "end_line": 402, "span_ids": ["TestSysPathsSnapshot.test_restore", "TestSysPathsSnapshot.path", "TestSysPathsSnapshot"], "tokens": 343}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"path_type\", (\"path\", \"meta_path\"))\nclass TestSysPathsSnapshot(object):\n other_path = {\"path\": \"meta_path\", \"meta_path\": \"path\"}\n\n @staticmethod\n def path(n):\n return \"my-dirty-little-secret-\" + str(n)\n\n def test_restore(self, monkeypatch, path_type):\n other_path_type = self.other_path[path_type]\n for i in range(10):\n assert self.path(i) not in getattr(sys, path_type)\n sys_path = [self.path(i) for i in range(6)]\n monkeypatch.setattr(sys, path_type, sys_path)\n original = list(sys_path)\n original_other = list(getattr(sys, other_path_type))\n snapshot = SysPathsSnapshot()\n transformation = {\"source\": (0, 1, 2, 3, 4, 5), \"target\": (6, 2, 9, 7, 5, 8)}\n assert sys_path == [self.path(x) for x in transformation[\"source\"]]\n sys_path[1] = self.path(6)\n sys_path[3] = self.path(7)\n sys_path.append(self.path(8))\n del sys_path[4]\n sys_path[3:3] = [self.path(9)]\n del sys_path[0]\n assert sys_path == [self.path(x) for x in transformation[\"target\"]]\n snapshot.restore()\n assert getattr(sys, path_type) is sys_path\n assert getattr(sys, path_type) == original\n assert getattr(sys, other_path_type) == original_other", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_TestSysPathsSnapshot.test_preserve_container_TestSysPathsSnapshot.test_preserve_container.None_3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_TestSysPathsSnapshot.test_preserve_container_TestSysPathsSnapshot.test_preserve_container.None_3", "embedding": null, "metadata": {"file_path": "testing/test_pytester.py", "file_name": "test_pytester.py", "file_type": "text/x-python", "category": "test", "start_line": 404, "end_line": 416, "span_ids": ["TestSysPathsSnapshot.test_preserve_container"], "tokens": 154}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"path_type\", (\"path\", \"meta_path\"))\nclass TestSysPathsSnapshot(object):\n\n def test_preserve_container(self, monkeypatch, path_type):\n other_path_type = self.other_path[path_type]\n original_data = list(getattr(sys, path_type))\n original_other = getattr(sys, other_path_type)\n original_other_data = list(original_other)\n new = []\n snapshot = SysPathsSnapshot()\n monkeypatch.setattr(sys, path_type, new)\n snapshot.restore()\n assert getattr(sys, path_type) is new\n assert getattr(sys, path_type) == original_data\n assert getattr(sys, other_path_type) is original_other\n assert getattr(sys, other_path_type) == original_other_data", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_testdir_subprocess_test_pytester_addopts.assert_os_environ_PYTEST": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_testdir_subprocess_test_pytester_addopts.assert_os_environ_PYTEST", "embedding": null, "metadata": {"file_path": "testing/test_pytester.py", "file_name": "test_pytester.py", "file_type": "text/x-python", "category": "test", "start_line": 419, "end_line": 485, "span_ids": ["test_testdir_run_no_timeout", "test_unicode_args", "test_pytester_addopts", "test_testdir_run_timeout_expires", "test_linematcher_with_nonlist", "test_testdir_subprocess", "test_testdir_run_with_timeout"], "tokens": 446}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_testdir_subprocess(testdir):\n testfile = testdir.makepyfile(\"def test_one(): pass\")\n assert testdir.runpytest_subprocess(testfile).ret == 0\n\n\ndef test_unicode_args(testdir):\n result = testdir.runpytest(\"-k\", u\"\ud83d\udca9\")\n assert result.ret == EXIT_NOTESTSCOLLECTED\n\n\ndef test_testdir_run_no_timeout(testdir):\n testfile = testdir.makepyfile(\"def test_no_timeout(): pass\")\n assert testdir.runpytest_subprocess(testfile).ret == EXIT_OK\n\n\ndef test_testdir_run_with_timeout(testdir):\n testfile = testdir.makepyfile(\"def test_no_timeout(): pass\")\n\n timeout = 120\n\n start = time.time()\n result = testdir.runpytest_subprocess(testfile, timeout=timeout)\n end = time.time()\n duration = end - start\n\n assert result.ret == EXIT_OK\n assert duration < timeout\n\n\ndef test_testdir_run_timeout_expires(testdir):\n testfile = testdir.makepyfile(\n \"\"\"\n import time\n\n def test_timeout():\n time.sleep(10)\"\"\"\n )\n with pytest.raises(testdir.TimeoutExpired):\n testdir.runpytest_subprocess(testfile, timeout=1)\n\n\ndef test_linematcher_with_nonlist():\n \"\"\"Test LineMatcher with regard to passing in a set (accidentally).\"\"\"\n lm = LineMatcher([])\n\n with pytest.raises(AssertionError):\n lm.fnmatch_lines(set())\n with pytest.raises(AssertionError):\n lm.fnmatch_lines({})\n lm.fnmatch_lines([])\n lm.fnmatch_lines(())\n\n assert lm._getlines({}) == {}\n assert lm._getlines(set()) == set()\n\n\ndef test_pytester_addopts(request, monkeypatch):\n monkeypatch.setenv(\"PYTEST_ADDOPTS\", \"--orig-unused\")\n\n testdir = request.getfixturevalue(\"testdir\")\n\n try:\n assert \"PYTEST_ADDOPTS\" not in os.environ\n finally:\n testdir.finalize()\n\n assert os.environ[\"PYTEST_ADDOPTS\"] == \"--orig-unused\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_run_stdin_test_run_stdin.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_run_stdin_test_run_stdin.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_pytester.py", "file_name": "test_pytester.py", "file_type": "text/x-python", "category": "test", "start_line": 488, "end_line": 515, "span_ids": ["test_run_stdin"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_run_stdin(testdir):\n with pytest.raises(testdir.TimeoutExpired):\n testdir.run(\n sys.executable,\n \"-c\",\n \"import sys, time; time.sleep(1); print(sys.stdin.read())\",\n stdin=subprocess.PIPE,\n timeout=0.1,\n )\n\n with pytest.raises(testdir.TimeoutExpired):\n result = testdir.run(\n sys.executable,\n \"-c\",\n \"import sys, time; time.sleep(1); print(sys.stdin.read())\",\n stdin=b\"input\\n2ndline\",\n timeout=0.1,\n )\n\n result = testdir.run(\n sys.executable,\n \"-c\",\n \"import sys; print(sys.stdin.read())\",\n stdin=b\"input\\n2ndline\",\n )\n assert result.stdout.lines == [\"input\", \"2ndline\"]\n assert result.stderr.str() == \"\"\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_popen_stdin_pipe_test_popen_stdin_bytes.assert_proc_returncode_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_popen_stdin_pipe_test_popen_stdin_bytes.assert_proc_returncode_", "embedding": null, "metadata": {"file_path": "testing/test_pytester.py", "file_name": "test_pytester.py", "file_type": "text/x-python", "category": "test", "start_line": 518, "end_line": 542, "span_ids": ["test_popen_stdin_bytes", "test_popen_stdin_pipe"], "tokens": 214}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_popen_stdin_pipe(testdir):\n proc = testdir.popen(\n [sys.executable, \"-c\", \"import sys; print(sys.stdin.read())\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdin=subprocess.PIPE,\n )\n stdin = b\"input\\n2ndline\"\n stdout, stderr = proc.communicate(input=stdin)\n assert stdout.decode(\"utf8\").splitlines() == [\"input\", \"2ndline\"]\n assert stderr == b\"\"\n assert proc.returncode == 0\n\n\ndef test_popen_stdin_bytes(testdir):\n proc = testdir.popen(\n [sys.executable, \"-c\", \"import sys; print(sys.stdin.read())\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdin=b\"input\\n2ndline\",\n )\n stdout, stderr = proc.communicate()\n assert stdout.decode(\"utf8\").splitlines() == [\"input\", \"2ndline\"]\n assert stderr == b\"\"\n assert proc.returncode == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_popen_default_stdin_stderr_and_stdin_None_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pytester.py_test_popen_default_stdin_stderr_and_stdin_None_", "embedding": null, "metadata": {"file_path": "testing/test_pytester.py", "file_name": "test_pytester.py", "file_type": "text/x-python", "category": "test", "start_line": 545, "end_line": 562, "span_ids": ["test_popen_default_stdin_stderr_and_stdin_None"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_popen_default_stdin_stderr_and_stdin_None(testdir):\n # stdout, stderr default to pipes,\n # stdin can be None to not close the pipe, avoiding\n # \"ValueError: flush of closed file\" with `communicate()`.\n p1 = testdir.makepyfile(\n \"\"\"\n import sys\n print(sys.stdin.read()) # empty\n print('stdout')\n sys.stderr.write('stderr')\n \"\"\"\n )\n proc = testdir.popen([sys.executable, str(p1)], stdin=None)\n stdout, stderr = proc.communicate(b\"ignored\")\n assert stdout.splitlines() == [b\"\", b\"stdout\"]\n assert stderr.splitlines() == [b\"stderr\"]\n assert proc.returncode == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_from___future___import_ab_test_recwarn_functional.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_from___future___import_ab_test_recwarn_functional.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_recwarn.py", "file_name": "test_recwarn.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 30, "span_ids": ["test_recwarn_stacklevel", "imports", "test_recwarn_functional"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\nimport warnings\n\nimport pytest\nfrom _pytest.recwarn import WarningsRecorder\nfrom _pytest.warning_types import PytestDeprecationWarning\n\n\ndef test_recwarn_stacklevel(recwarn):\n warnings.warn(\"hello\")\n warn = recwarn.pop()\n assert warn.filename == __file__\n\n\ndef test_recwarn_functional(testdir):\n testdir.makepyfile(\n \"\"\"\n import warnings\n def test_method(recwarn):\n warnings.warn(\"hello\")\n warn = recwarn.pop()\n assert isinstance(warn.message, UserWarning)\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarningsRecorderChecker_TestWarningsRecorderChecker.test_recording.with_rec_.pytest_raises_AssertionEr": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarningsRecorderChecker_TestWarningsRecorderChecker.test_recording.with_rec_.pytest_raises_AssertionEr", "embedding": null, "metadata": {"file_path": "testing/test_recwarn.py", "file_name": "test_recwarn.py", "file_type": "text/x-python", "category": "test", "start_line": 33, "end_line": 48, "span_ids": ["TestWarningsRecorderChecker.test_recording", "TestWarningsRecorderChecker"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestWarningsRecorderChecker(object):\n def test_recording(self):\n rec = WarningsRecorder()\n with rec:\n assert not rec.list\n warnings.warn_explicit(\"hello\", UserWarning, \"xyz\", 13)\n assert len(rec.list) == 1\n warnings.warn(DeprecationWarning(\"hello\"))\n assert len(rec.list) == 2\n warn = rec.pop()\n assert str(warn.message) == \"hello\"\n values = rec.list\n rec.clear()\n assert len(rec.list) == 0\n assert values is rec.list\n pytest.raises(AssertionError, rec.pop)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarningsRecorderChecker.test_warn_stacklevel_TestWarningsRecorderChecker.test_invalid_enter_exit.with_WarningsRecorder_.None_1.with_rec_.with_rec_._can_t_enter_twice": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarningsRecorderChecker.test_warn_stacklevel_TestWarningsRecorderChecker.test_invalid_enter_exit.with_WarningsRecorder_.None_1.with_rec_.with_rec_._can_t_enter_twice", "embedding": null, "metadata": {"file_path": "testing/test_recwarn.py", "file_name": "test_recwarn.py", "file_type": "text/x-python", "category": "test", "start_line": 50, "end_line": 77, "span_ids": ["TestWarningsRecorderChecker.test_invalid_enter_exit", "TestWarningsRecorderChecker.test_typechecking", "TestWarningsRecorderChecker.test_warn_stacklevel"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestWarningsRecorderChecker(object):\n\n @pytest.mark.issue(4243)\n def test_warn_stacklevel(self):\n rec = WarningsRecorder()\n with rec:\n warnings.warn(\"test\", DeprecationWarning, 2)\n\n def test_typechecking(self):\n from _pytest.recwarn import WarningsChecker\n\n with pytest.raises(TypeError):\n WarningsChecker(5)\n with pytest.raises(TypeError):\n WarningsChecker((\"hi\", RuntimeWarning))\n with pytest.raises(TypeError):\n WarningsChecker([DeprecationWarning, RuntimeWarning])\n\n def test_invalid_enter_exit(self):\n # wrap this test in WarningsRecorder to ensure warning state gets reset\n with WarningsRecorder():\n with pytest.raises(RuntimeError):\n rec = WarningsRecorder()\n rec.__exit__(None, None, None) # can't exit before entering\n\n with pytest.raises(RuntimeError):\n rec = WarningsRecorder()\n with rec:\n with rec:\n pass # can't enter twice", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestDeprecatedCall_TestDeprecatedCall.test_deprecated_explicit_call.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestDeprecatedCall_TestDeprecatedCall.test_deprecated_explicit_call.None_1", "embedding": null, "metadata": {"file_path": "testing/test_recwarn.py", "file_name": "test_recwarn.py", "file_type": "text/x-python", "category": "test", "start_line": 80, "end_line": 123, "span_ids": ["TestDeprecatedCall.test_deprecated_call_raises", "TestDeprecatedCall.dep", "TestDeprecatedCall.test_deprecated_call", "TestDeprecatedCall.dep_explicit", "TestDeprecatedCall.test_deprecated_call_ret", "TestDeprecatedCall.test_deprecated_explicit_call_raises", "TestDeprecatedCall.test_deprecated_explicit_call", "TestDeprecatedCall.test_deprecated_call_preserves", "TestDeprecatedCall"], "tokens": 354}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDeprecatedCall(object):\n \"\"\"test pytest.deprecated_call()\"\"\"\n\n def dep(self, i, j=None):\n if i == 0:\n warnings.warn(\"is deprecated\", DeprecationWarning, stacklevel=1)\n return 42\n\n def dep_explicit(self, i):\n if i == 0:\n warnings.warn_explicit(\n \"dep_explicit\", category=DeprecationWarning, filename=\"hello\", lineno=3\n )\n\n def test_deprecated_call_raises(self):\n with pytest.raises(pytest.fail.Exception, match=\"No warnings of type\"):\n pytest.deprecated_call(self.dep, 3, 5)\n\n def test_deprecated_call(self):\n pytest.deprecated_call(self.dep, 0, 5)\n\n def test_deprecated_call_ret(self):\n ret = pytest.deprecated_call(self.dep, 0)\n assert ret == 42\n\n def test_deprecated_call_preserves(self):\n onceregistry = warnings.onceregistry.copy()\n filters = warnings.filters[:]\n warn = warnings.warn\n warn_explicit = warnings.warn_explicit\n self.test_deprecated_call_raises()\n self.test_deprecated_call()\n assert onceregistry == warnings.onceregistry\n assert filters == warnings.filters\n assert warn is warnings.warn\n assert warn_explicit is warnings.warn_explicit\n\n def test_deprecated_explicit_call_raises(self):\n with pytest.raises(pytest.fail.Exception):\n pytest.deprecated_call(self.dep_explicit, 3)\n\n def test_deprecated_explicit_call(self):\n pytest.deprecated_call(self.dep_explicit, 0)\n pytest.deprecated_call(self.dep_explicit, 0)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestDeprecatedCall.test_deprecated_call_no_warning_TestDeprecatedCall.test_deprecated_call_no_warning.with_pytest_raises_pytest.if_mode_call_.else_.with_pytest_deprecated_ca.f_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestDeprecatedCall.test_deprecated_call_no_warning_TestDeprecatedCall.test_deprecated_call_no_warning.with_pytest_raises_pytest.if_mode_call_.else_.with_pytest_deprecated_ca.f_", "embedding": null, "metadata": {"file_path": "testing/test_recwarn.py", "file_name": "test_recwarn.py", "file_type": "text/x-python", "category": "test", "start_line": 125, "end_line": 140, "span_ids": ["TestDeprecatedCall.test_deprecated_call_no_warning"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDeprecatedCall(object):\n\n @pytest.mark.parametrize(\"mode\", [\"context_manager\", \"call\"])\n def test_deprecated_call_no_warning(self, mode):\n \"\"\"Ensure deprecated_call() raises the expected failure when its block/function does\n not raise a deprecation warning.\n \"\"\"\n\n def f():\n pass\n\n msg = \"No warnings of type (.*DeprecationWarning.*, .*PendingDeprecationWarning.*)\"\n with pytest.raises(pytest.fail.Exception, match=msg):\n if mode == \"call\":\n pytest.deprecated_call(f)\n else:\n with pytest.deprecated_call():\n f()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestDeprecatedCall.test_deprecated_call_modes_TestDeprecatedCall.test_deprecated_call_modes.if_mode_call_.else_.with_pytest_deprecated_ca.assert_f_10": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestDeprecatedCall.test_deprecated_call_modes_TestDeprecatedCall.test_deprecated_call_modes.if_mode_call_.else_.with_pytest_deprecated_ca.assert_f_10", "embedding": null, "metadata": {"file_path": "testing/test_recwarn.py", "file_name": "test_recwarn.py", "file_type": "text/x-python", "category": "test", "start_line": 142, "end_line": 164, "span_ids": ["TestDeprecatedCall.test_deprecated_call_modes"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDeprecatedCall(object):\n\n @pytest.mark.parametrize(\n \"warning_type\", [PendingDeprecationWarning, DeprecationWarning]\n )\n @pytest.mark.parametrize(\"mode\", [\"context_manager\", \"call\"])\n @pytest.mark.parametrize(\"call_f_first\", [True, False])\n @pytest.mark.filterwarnings(\"ignore\")\n def test_deprecated_call_modes(self, warning_type, mode, call_f_first):\n \"\"\"Ensure deprecated_call() captures a deprecation warning as expected inside its\n block/function.\n \"\"\"\n\n def f():\n warnings.warn(warning_type(\"hi\"))\n return 10\n\n # ensure deprecated_call() can capture the warning even if it has already been triggered\n if call_f_first:\n assert f() == 10\n if mode == \"call\":\n assert pytest.deprecated_call(f) == 10\n else:\n with pytest.deprecated_call():\n assert f() == 10", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestDeprecatedCall.test_deprecated_call_exception_is_raised_TestDeprecatedCall.test_deprecated_call_supports_match.with_pytest_raises_pytest.with_pytest_deprecated_ca.warnings_warn_this_is_no": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestDeprecatedCall.test_deprecated_call_exception_is_raised_TestDeprecatedCall.test_deprecated_call_supports_match.with_pytest_raises_pytest.with_pytest_deprecated_ca.warnings_warn_this_is_no", "embedding": null, "metadata": {"file_path": "testing/test_recwarn.py", "file_name": "test_recwarn.py", "file_type": "text/x-python", "category": "test", "start_line": 166, "end_line": 209, "span_ids": ["TestDeprecatedCall.test_deprecated_call_supports_match", "TestDeprecatedCall.test_deprecated_call_exception_is_raised", "TestDeprecatedCall.test_deprecated_call_specificity"], "tokens": 291}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDeprecatedCall(object):\n\n @pytest.mark.parametrize(\"mode\", [\"context_manager\", \"call\"])\n def test_deprecated_call_exception_is_raised(self, mode):\n \"\"\"If the block of the code being tested by deprecated_call() raises an exception,\n it must raise the exception undisturbed.\n \"\"\"\n\n def f():\n raise ValueError(\"some exception\")\n\n with pytest.raises(ValueError, match=\"some exception\"):\n if mode == \"call\":\n pytest.deprecated_call(f)\n else:\n with pytest.deprecated_call():\n f()\n\n def test_deprecated_call_specificity(self):\n other_warnings = [\n Warning,\n UserWarning,\n SyntaxWarning,\n RuntimeWarning,\n FutureWarning,\n ImportWarning,\n UnicodeWarning,\n ]\n for warning in other_warnings:\n\n def f():\n warnings.warn(warning(\"hi\"))\n\n with pytest.raises(pytest.fail.Exception):\n pytest.deprecated_call(f)\n with pytest.raises(pytest.fail.Exception):\n with pytest.deprecated_call():\n f()\n\n def test_deprecated_call_supports_match(self):\n with pytest.deprecated_call(match=r\"must be \\d+$\"):\n warnings.warn(\"value must be 42\", DeprecationWarning)\n\n with pytest.raises(pytest.fail.Exception):\n with pytest.deprecated_call(match=r\"must be \\d+$\"):\n warnings.warn(\"this is not here\", DeprecationWarning)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarns_TestWarns.test_strings.for_w_in_warninfo_.assert_msg_startswith_wa": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarns_TestWarns.test_strings.for_w_in_warninfo_.assert_msg_startswith_wa", "embedding": null, "metadata": {"file_path": "testing/test_recwarn.py", "file_name": "test_recwarn.py", "file_type": "text/x-python", "category": "test", "start_line": 212, "end_line": 228, "span_ids": ["TestWarns.test_strings", "TestWarns"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestWarns(object):\n def test_strings(self):\n # different messages, b/c Python suppresses multiple identical warnings\n source1 = \"warnings.warn('w1', RuntimeWarning)\"\n source2 = \"warnings.warn('w2', RuntimeWarning)\"\n source3 = \"warnings.warn('w3', RuntimeWarning)\"\n with pytest.warns(PytestDeprecationWarning) as warninfo: # yo dawg\n pytest.warns(RuntimeWarning, source1)\n pytest.raises(\n pytest.fail.Exception, lambda: pytest.warns(UserWarning, source2)\n )\n pytest.warns(RuntimeWarning, source3)\n assert len(warninfo) == 3\n for w in warninfo:\n assert w.filename == __file__\n msg, = w.message.args\n assert msg.startswith(\"warns(..., 'code(as_a_string)') is deprecated\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarns.test_function_TestWarns.test_warning_tuple.pytest_raises_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarns.test_function_TestWarns.test_warning_tuple.pytest_raises_", "embedding": null, "metadata": {"file_path": "testing/test_recwarn.py", "file_name": "test_recwarn.py", "file_type": "text/x-python", "category": "test", "start_line": 230, "end_line": 248, "span_ids": ["TestWarns.test_function", "TestWarns.test_warning_tuple"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestWarns(object):\n\n def test_function(self):\n pytest.warns(\n SyntaxWarning, lambda msg: warnings.warn(msg, SyntaxWarning), \"syntax\"\n )\n\n def test_warning_tuple(self):\n pytest.warns(\n (RuntimeWarning, SyntaxWarning), lambda: warnings.warn(\"w1\", RuntimeWarning)\n )\n pytest.warns(\n (RuntimeWarning, SyntaxWarning), lambda: warnings.warn(\"w2\", SyntaxWarning)\n )\n pytest.raises(\n pytest.fail.Exception,\n lambda: pytest.warns(\n (RuntimeWarning, SyntaxWarning),\n lambda: warnings.warn(\"w3\", UserWarning),\n ),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarns.test_as_contextmanager_TestWarns.test_as_contextmanager.None_3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarns.test_as_contextmanager_TestWarns.test_as_contextmanager.None_3", "embedding": null, "metadata": {"file_path": "testing/test_recwarn.py", "file_name": "test_recwarn.py", "file_type": "text/x-python", "category": "test", "start_line": 250, "end_line": 297, "span_ids": ["TestWarns.test_as_contextmanager"], "tokens": 403}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestWarns(object):\n\n def test_as_contextmanager(self):\n with pytest.warns(RuntimeWarning):\n warnings.warn(\"runtime\", RuntimeWarning)\n\n with pytest.warns(UserWarning):\n warnings.warn(\"user\", UserWarning)\n\n with pytest.raises(pytest.fail.Exception) as excinfo:\n with pytest.warns(RuntimeWarning):\n warnings.warn(\"user\", UserWarning)\n excinfo.match(\n r\"DID NOT WARN. No warnings of type \\(.+RuntimeWarning.+,\\) was emitted. \"\n r\"The list of emitted warnings is: \\[UserWarning\\('user',?\\)\\].\"\n )\n\n with pytest.raises(pytest.fail.Exception) as excinfo:\n with pytest.warns(UserWarning):\n warnings.warn(\"runtime\", RuntimeWarning)\n excinfo.match(\n r\"DID NOT WARN. No warnings of type \\(.+UserWarning.+,\\) was emitted. \"\n r\"The list of emitted warnings is: \\[RuntimeWarning\\('runtime',?\\)\\].\"\n )\n\n with pytest.raises(pytest.fail.Exception) as excinfo:\n with pytest.warns(UserWarning):\n pass\n excinfo.match(\n r\"DID NOT WARN. No warnings of type \\(.+UserWarning.+,\\) was emitted. \"\n r\"The list of emitted warnings is: \\[\\].\"\n )\n\n warning_classes = (UserWarning, FutureWarning)\n with pytest.raises(pytest.fail.Exception) as excinfo:\n with pytest.warns(warning_classes) as warninfo:\n warnings.warn(\"runtime\", RuntimeWarning)\n warnings.warn(\"import\", ImportWarning)\n\n message_template = (\n \"DID NOT WARN. No warnings of type {0} was emitted. \"\n \"The list of emitted warnings is: {1}.\"\n )\n excinfo.match(\n re.escape(\n message_template.format(\n warning_classes, [each.message for each in warninfo]\n )\n )\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarns.test_record_TestWarns.test_record_only.assert_str_record_1_mess": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarns.test_record_TestWarns.test_record_only.assert_str_record_1_mess", "embedding": null, "metadata": {"file_path": "testing/test_recwarn.py", "file_name": "test_recwarn.py", "file_type": "text/x-python", "category": "test", "start_line": 299, "end_line": 313, "span_ids": ["TestWarns.test_record", "TestWarns.test_record_only"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestWarns(object):\n\n def test_record(self):\n with pytest.warns(UserWarning) as record:\n warnings.warn(\"user\", UserWarning)\n\n assert len(record) == 1\n assert str(record[0].message) == \"user\"\n\n def test_record_only(self):\n with pytest.warns(None) as record:\n warnings.warn(\"user\", UserWarning)\n warnings.warn(\"runtime\", RuntimeWarning)\n\n assert len(record) == 2\n assert str(record[0].message) == \"user\"\n assert str(record[1].message) == \"runtime\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarns.test_record_by_subclass_TestWarns.test_record_by_subclass.None_5": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarns.test_record_by_subclass_TestWarns.test_record_by_subclass.None_5", "embedding": null, "metadata": {"file_path": "testing/test_recwarn.py", "file_name": "test_recwarn.py", "file_type": "text/x-python", "category": "test", "start_line": 315, "end_line": 336, "span_ids": ["TestWarns.test_record_by_subclass.MyRuntimeWarning", "TestWarns.test_record_by_subclass.MyUserWarning", "TestWarns.test_record_by_subclass.MyRuntimeWarning:2", "TestWarns.test_record_by_subclass", "TestWarns.test_record_by_subclass.MyUserWarning:2"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestWarns(object):\n\n def test_record_by_subclass(self):\n with pytest.warns(Warning) as record:\n warnings.warn(\"user\", UserWarning)\n warnings.warn(\"runtime\", RuntimeWarning)\n\n assert len(record) == 2\n assert str(record[0].message) == \"user\"\n assert str(record[1].message) == \"runtime\"\n\n class MyUserWarning(UserWarning):\n pass\n\n class MyRuntimeWarning(RuntimeWarning):\n pass\n\n with pytest.warns((UserWarning, RuntimeWarning)) as record:\n warnings.warn(\"user\", MyUserWarning)\n warnings.warn(\"runtime\", MyRuntimeWarning)\n\n assert len(record) == 2\n assert str(record[0].message) == \"user\"\n assert str(record[1].message) == \"runtime\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarns.test_double_test_TestWarns.test_match_regex.None_2.with_pytest_warns_FutureW.warnings_warn_value_must": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarns.test_double_test_TestWarns.test_match_regex.None_2.with_pytest_warns_FutureW.warnings_warn_value_must", "embedding": null, "metadata": {"file_path": "testing/test_recwarn.py", "file_name": "test_recwarn.py", "file_type": "text/x-python", "category": "test", "start_line": 338, "end_line": 364, "span_ids": ["TestWarns.test_match_regex", "TestWarns.test_double_test"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestWarns(object):\n\n def test_double_test(self, testdir):\n \"\"\"If a test is run again, the warning should still be raised\"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n import warnings\n\n @pytest.mark.parametrize('run', [1, 2])\n def test(run):\n with pytest.warns(RuntimeWarning):\n warnings.warn(\"runtime\", RuntimeWarning)\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*2 passed in*\"])\n\n def test_match_regex(self):\n with pytest.warns(UserWarning, match=r\"must be \\d+$\"):\n warnings.warn(\"value must be 42\", UserWarning)\n\n with pytest.raises(pytest.fail.Exception):\n with pytest.warns(UserWarning, match=r\"must be \\d+$\"):\n warnings.warn(\"this is not here\", UserWarning)\n\n with pytest.raises(pytest.fail.Exception):\n with pytest.warns(FutureWarning, match=r\"must be \\d+$\"):\n warnings.warn(\"value must be 42\", UserWarning)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarns.test_one_from_multiple_warns_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_recwarn.py_TestWarns.test_one_from_multiple_warns_", "embedding": null, "metadata": {"file_path": "testing/test_recwarn.py", "file_name": "test_recwarn.py", "file_type": "text/x-python", "category": "test", "start_line": 366, "end_line": 387, "span_ids": ["TestWarns.test_one_from_multiple_warns", "TestWarns.test_none_of_multiple_warns", "TestWarns.test_can_capture_previously_warned"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestWarns(object):\n\n def test_one_from_multiple_warns(self):\n with pytest.warns(UserWarning, match=r\"aaa\"):\n warnings.warn(\"cccccccccc\", UserWarning)\n warnings.warn(\"bbbbbbbbbb\", UserWarning)\n warnings.warn(\"aaaaaaaaaa\", UserWarning)\n\n def test_none_of_multiple_warns(self):\n with pytest.raises(pytest.fail.Exception):\n with pytest.warns(UserWarning, match=r\"aaa\"):\n warnings.warn(\"bbbbbbbbbb\", UserWarning)\n warnings.warn(\"cccccccccc\", UserWarning)\n\n @pytest.mark.filterwarnings(\"ignore\")\n def test_can_capture_previously_warned(self):\n def f():\n warnings.warn(UserWarning(\"ohai\"))\n return 10\n\n assert f() == 10\n assert pytest.warns(UserWarning, f) == 10\n assert pytest.warns(UserWarning, f) == 10", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_pytest_TestReportSerialization.test_xdist_longrepr_to_str_issue_241.assert_test_b_call__to_js": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_pytest_TestReportSerialization.test_xdist_longrepr_to_str_issue_241.assert_test_b_call__to_js", "embedding": null, "metadata": {"file_path": "testing/test_reports.py", "file_name": "test_reports.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 30, "span_ids": ["TestReportSerialization", "imports", "TestReportSerialization.test_xdist_longrepr_to_str_issue_241"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\nfrom _pytest.pathlib import Path\nfrom _pytest.reports import CollectReport\nfrom _pytest.reports import TestReport\n\n\nclass TestReportSerialization(object):\n def test_xdist_longrepr_to_str_issue_241(self, testdir):\n \"\"\"\n Regarding issue pytest-xdist#241\n\n This test came originally from test_remote.py in xdist (ca03269).\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n def test_a(): assert False\n def test_b(): pass\n \"\"\"\n )\n reprec = testdir.inline_run()\n reports = reprec.getreports(\"pytest_runtest_logreport\")\n assert len(reports) == 6\n test_a_call = reports[1]\n assert test_a_call.when == \"call\"\n assert test_a_call.outcome == \"failed\"\n assert test_a_call._to_json()[\"longrepr\"][\"reprtraceback\"][\"style\"] == \"long\"\n test_b_call = reports[4]\n assert test_b_call.when == \"call\"\n assert test_b_call.outcome == \"passed\"\n assert test_b_call._to_json()[\"longrepr\"] is None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_xdist_report_longrepr_reprcrash_130_TestReportSerialization.test_xdist_report_longrepr_reprcrash_130.assert_added_section_in_a": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_xdist_report_longrepr_reprcrash_130_TestReportSerialization.test_xdist_report_longrepr_reprcrash_130.assert_added_section_in_a", "embedding": null, "metadata": {"file_path": "testing/test_reports.py", "file_name": "test_reports.py", "file_type": "text/x-python", "category": "test", "start_line": 32, "end_line": 65, "span_ids": ["TestReportSerialization.test_xdist_report_longrepr_reprcrash_130"], "tokens": 372}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestReportSerialization(object):\n\n def test_xdist_report_longrepr_reprcrash_130(self, testdir):\n \"\"\"Regarding issue pytest-xdist#130\n\n This test came originally from test_remote.py in xdist (ca03269).\n \"\"\"\n reprec = testdir.inline_runsource(\n \"\"\"\n def test_fail():\n assert False, 'Expected Message'\n \"\"\"\n )\n reports = reprec.getreports(\"pytest_runtest_logreport\")\n assert len(reports) == 3\n rep = reports[1]\n added_section = (\"Failure Metadata\", str(\"metadata metadata\"), \"*\")\n rep.longrepr.sections.append(added_section)\n d = rep._to_json()\n a = TestReport._from_json(d)\n # Check assembled == rep\n assert a.__dict__.keys() == rep.__dict__.keys()\n for key in rep.__dict__.keys():\n if key != \"longrepr\":\n assert getattr(a, key) == getattr(rep, key)\n assert rep.longrepr.reprcrash.lineno == a.longrepr.reprcrash.lineno\n assert rep.longrepr.reprcrash.message == a.longrepr.reprcrash.message\n assert rep.longrepr.reprcrash.path == a.longrepr.reprcrash.path\n assert rep.longrepr.reprtraceback.entrysep == a.longrepr.reprtraceback.entrysep\n assert (\n rep.longrepr.reprtraceback.extraline == a.longrepr.reprtraceback.extraline\n )\n assert rep.longrepr.reprtraceback.style == a.longrepr.reprtraceback.style\n assert rep.longrepr.sections == a.longrepr.sections\n # Missing section attribute PR171\n assert added_section in a.longrepr.sections", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_reprentries_serialization_170_TestReportSerialization.test_reprentries_serialization_170.for_i_in_range_len_a_entr.assert_rep_entries_i_sty": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_reprentries_serialization_170_TestReportSerialization.test_reprentries_serialization_170.for_i_in_range_len_a_entr.assert_rep_entries_i_sty", "embedding": null, "metadata": {"file_path": "testing/test_reports.py", "file_name": "test_reports.py", "file_type": "text/x-python", "category": "test", "start_line": 67, "end_line": 100, "span_ids": ["TestReportSerialization.test_reprentries_serialization_170"], "tokens": 330}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestReportSerialization(object):\n\n def test_reprentries_serialization_170(self, testdir):\n \"\"\"Regarding issue pytest-xdist#170\n\n This test came originally from test_remote.py in xdist (ca03269).\n \"\"\"\n from _pytest._code.code import ReprEntry\n\n reprec = testdir.inline_runsource(\n \"\"\"\n def test_repr_entry():\n x = 0\n assert x\n \"\"\",\n \"--showlocals\",\n )\n reports = reprec.getreports(\"pytest_runtest_logreport\")\n assert len(reports) == 3\n rep = reports[1]\n d = rep._to_json()\n a = TestReport._from_json(d)\n\n rep_entries = rep.longrepr.reprtraceback.reprentries\n a_entries = a.longrepr.reprtraceback.reprentries\n for i in range(len(a_entries)):\n assert isinstance(rep_entries[i], ReprEntry)\n assert rep_entries[i].lines == a_entries[i].lines\n assert rep_entries[i].reprfileloc.lineno == a_entries[i].reprfileloc.lineno\n assert (\n rep_entries[i].reprfileloc.message == a_entries[i].reprfileloc.message\n )\n assert rep_entries[i].reprfileloc.path == a_entries[i].reprfileloc.path\n assert rep_entries[i].reprfuncargs.args == a_entries[i].reprfuncargs.args\n assert rep_entries[i].reprlocals.lines == a_entries[i].reprlocals.lines\n assert rep_entries[i].style == a_entries[i].style", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_reprentries_serialization_196_TestReportSerialization.test_reprentries_serialization_196.for_i_in_range_len_a_entr.assert_rep_entries_i_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_reprentries_serialization_196_TestReportSerialization.test_reprentries_serialization_196.for_i_in_range_len_a_entr.assert_rep_entries_i_lin", "embedding": null, "metadata": {"file_path": "testing/test_reports.py", "file_name": "test_reports.py", "file_type": "text/x-python", "category": "test", "start_line": 102, "end_line": 127, "span_ids": ["TestReportSerialization.test_reprentries_serialization_196"], "tokens": 216}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestReportSerialization(object):\n\n def test_reprentries_serialization_196(self, testdir):\n \"\"\"Regarding issue pytest-xdist#196\n\n This test came originally from test_remote.py in xdist (ca03269).\n \"\"\"\n from _pytest._code.code import ReprEntryNative\n\n reprec = testdir.inline_runsource(\n \"\"\"\n def test_repr_entry_native():\n x = 0\n assert x\n \"\"\",\n \"--tb=native\",\n )\n reports = reprec.getreports(\"pytest_runtest_logreport\")\n assert len(reports) == 3\n rep = reports[1]\n d = rep._to_json()\n a = TestReport._from_json(d)\n\n rep_entries = rep.longrepr.reprtraceback.reprentries\n a_entries = a.longrepr.reprtraceback.reprentries\n for i in range(len(a_entries)):\n assert isinstance(rep_entries[i], ReprEntryNative)\n assert rep_entries[i].lines == a_entries[i].lines", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_itemreport_outcomes_TestReportSerialization.test_itemreport_outcomes.for_rep_in_reports_.if_rep_failed_.assert_newrep_longreprtex": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_itemreport_outcomes_TestReportSerialization.test_itemreport_outcomes.for_rep_in_reports_.if_rep_failed_.assert_newrep_longreprtex", "embedding": null, "metadata": {"file_path": "testing/test_reports.py", "file_name": "test_reports.py", "file_type": "text/x-python", "category": "test", "start_line": 129, "end_line": 162, "span_ids": ["TestReportSerialization.test_itemreport_outcomes"], "tokens": 312}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestReportSerialization(object):\n\n def test_itemreport_outcomes(self, testdir):\n \"\"\"\n This test came originally from test_remote.py in xdist (ca03269).\n \"\"\"\n reprec = testdir.inline_runsource(\n \"\"\"\n import py\n def test_pass(): pass\n def test_fail(): 0/0\n @py.test.mark.skipif(\"True\")\n def test_skip(): pass\n def test_skip_imperative():\n py.test.skip(\"hello\")\n @py.test.mark.xfail(\"True\")\n def test_xfail(): 0/0\n def test_xfail_imperative():\n py.test.xfail(\"hello\")\n \"\"\"\n )\n reports = reprec.getreports(\"pytest_runtest_logreport\")\n assert len(reports) == 17 # with setup/teardown \"passed\" reports\n for rep in reports:\n d = rep._to_json()\n newrep = TestReport._from_json(d)\n assert newrep.passed == rep.passed\n assert newrep.failed == rep.failed\n assert newrep.skipped == rep.skipped\n if newrep.skipped and not hasattr(newrep, \"wasxfail\"):\n assert len(newrep.longrepr) == 3\n assert newrep.outcome == rep.outcome\n assert newrep.when == rep.when\n assert newrep.keywords == rep.keywords\n if rep.failed:\n assert newrep.longreprtext == rep.longreprtext", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_collectreport_passed_TestReportSerialization.test_collectreport_passed.for_rep_in_reports_.assert_newrep_skipped_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_collectreport_passed_TestReportSerialization.test_collectreport_passed.for_rep_in_reports_.assert_newrep_skipped_", "embedding": null, "metadata": {"file_path": "testing/test_reports.py", "file_name": "test_reports.py", "file_type": "text/x-python", "category": "test", "start_line": 164, "end_line": 173, "span_ids": ["TestReportSerialization.test_collectreport_passed"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestReportSerialization(object):\n\n def test_collectreport_passed(self, testdir):\n \"\"\"This test came originally from test_remote.py in xdist (ca03269).\"\"\"\n reprec = testdir.inline_runsource(\"def test_func(): pass\")\n reports = reprec.getreports(\"pytest_collectreport\")\n for rep in reports:\n d = rep._to_json()\n newrep = CollectReport._from_json(d)\n assert newrep.passed == rep.passed\n assert newrep.failed == rep.failed\n assert newrep.skipped == rep.skipped", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_collectreport_fail_TestReportSerialization.test_collectreport_fail.for_rep_in_reports_.if_rep_failed_.assert_newrep_longrepr_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_collectreport_fail_TestReportSerialization.test_collectreport_fail.for_rep_in_reports_.if_rep_failed_.assert_newrep_longrepr_", "embedding": null, "metadata": {"file_path": "testing/test_reports.py", "file_name": "test_reports.py", "file_type": "text/x-python", "category": "test", "start_line": 175, "end_line": 187, "span_ids": ["TestReportSerialization.test_collectreport_fail"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestReportSerialization(object):\n\n def test_collectreport_fail(self, testdir):\n \"\"\"This test came originally from test_remote.py in xdist (ca03269).\"\"\"\n reprec = testdir.inline_runsource(\"qwe abc\")\n reports = reprec.getreports(\"pytest_collectreport\")\n assert reports\n for rep in reports:\n d = rep._to_json()\n newrep = CollectReport._from_json(d)\n assert newrep.passed == rep.passed\n assert newrep.failed == rep.failed\n assert newrep.skipped == rep.skipped\n if rep.failed:\n assert newrep.longrepr == str(rep.longrepr)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_extended_report_deserialization_TestReportSerialization.test_extended_report_deserialization.for_rep_in_reports_.if_rep_failed_.assert_newrep_longrepr_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_extended_report_deserialization_TestReportSerialization.test_extended_report_deserialization.for_rep_in_reports_.if_rep_failed_.assert_newrep_longrepr_", "embedding": null, "metadata": {"file_path": "testing/test_reports.py", "file_name": "test_reports.py", "file_type": "text/x-python", "category": "test", "start_line": 189, "end_line": 203, "span_ids": ["TestReportSerialization.test_extended_report_deserialization"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestReportSerialization(object):\n\n def test_extended_report_deserialization(self, testdir):\n \"\"\"This test came originally from test_remote.py in xdist (ca03269).\"\"\"\n reprec = testdir.inline_runsource(\"qwe abc\")\n reports = reprec.getreports(\"pytest_collectreport\")\n assert reports\n for rep in reports:\n rep.extra = True\n d = rep._to_json()\n newrep = CollectReport._from_json(d)\n assert newrep.extra\n assert newrep.passed == rep.passed\n assert newrep.failed == rep.failed\n assert newrep.skipped == rep.skipped\n if rep.failed:\n assert newrep.longrepr == str(rep.longrepr)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_paths_support_TestReportSerialization.test_paths_support.assert_data_path2_s": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_paths_support_TestReportSerialization.test_paths_support.assert_data_path2_s", "embedding": null, "metadata": {"file_path": "testing/test_reports.py", "file_name": "test_reports.py", "file_type": "text/x-python", "category": "test", "start_line": 205, "end_line": 221, "span_ids": ["TestReportSerialization.test_paths_support"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestReportSerialization(object):\n\n def test_paths_support(self, testdir):\n \"\"\"Report attributes which are py.path or pathlib objects should become strings.\"\"\"\n testdir.makepyfile(\n \"\"\"\n def test_a():\n assert False\n \"\"\"\n )\n reprec = testdir.inline_run()\n reports = reprec.getreports(\"pytest_runtest_logreport\")\n assert len(reports) == 3\n test_a_call = reports[1]\n test_a_call.path1 = testdir.tmpdir\n test_a_call.path2 = Path(testdir.tmpdir)\n data = test_a_call._to_json()\n assert data[\"path1\"] == str(testdir.tmpdir)\n assert data[\"path2\"] == str(testdir.tmpdir)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_unserialization_failure_TestReportSerialization.test_unserialization_failure.with_pytest_raises_.TestReport__from_json_dat": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestReportSerialization.test_unserialization_failure_TestReportSerialization.test_unserialization_failure.with_pytest_raises_.TestReport__from_json_dat", "embedding": null, "metadata": {"file_path": "testing/test_reports.py", "file_name": "test_reports.py", "file_type": "text/x-python", "category": "test", "start_line": 223, "end_line": 243, "span_ids": ["TestReportSerialization.test_unserialization_failure"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestReportSerialization(object):\n\n def test_unserialization_failure(self, testdir):\n \"\"\"Check handling of failure during unserialization of report types.\"\"\"\n testdir.makepyfile(\n \"\"\"\n def test_a():\n assert False\n \"\"\"\n )\n reprec = testdir.inline_run()\n reports = reprec.getreports(\"pytest_runtest_logreport\")\n assert len(reports) == 3\n test_a_call = reports[1]\n data = test_a_call._to_json()\n entry = data[\"longrepr\"][\"reprtraceback\"][\"reprentries\"][0]\n assert entry[\"type\"] == \"ReprEntry\"\n\n entry[\"type\"] = \"Unknown\"\n with pytest.raises(\n RuntimeError, match=\"INTERNALERROR: Unknown entry type returned: Unknown\"\n ):\n TestReport._from_json(data)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestHooks_TestHooks.test_test_report.for_rep_in_reports_.assert_new_rep_outcome_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestHooks_TestHooks.test_test_report.for_rep_in_reports_.assert_new_rep_outcome_", "embedding": null, "metadata": {"file_path": "testing/test_reports.py", "file_name": "test_reports.py", "file_type": "text/x-python", "category": "test", "start_line": 246, "end_line": 269, "span_ids": ["TestHooks", "TestHooks.test_test_report"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestHooks:\n \"\"\"Test that the hooks are working correctly for plugins\"\"\"\n\n def test_test_report(self, testdir, pytestconfig):\n testdir.makepyfile(\n \"\"\"\n def test_a(): assert False\n def test_b(): pass\n \"\"\"\n )\n reprec = testdir.inline_run()\n reports = reprec.getreports(\"pytest_runtest_logreport\")\n assert len(reports) == 6\n for rep in reports:\n data = pytestconfig.hook.pytest_report_to_serializable(\n config=pytestconfig, report=rep\n )\n assert data[\"_report_type\"] == \"TestReport\"\n new_rep = pytestconfig.hook.pytest_report_from_serializable(\n config=pytestconfig, data=data\n )\n assert new_rep.nodeid == rep.nodeid\n assert new_rep.when == rep.when\n assert new_rep.outcome == rep.outcome", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestHooks.test_collect_report_TestHooks.test_collect_report.for_rep_in_reports_.assert_new_rep_outcome_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestHooks.test_collect_report_TestHooks.test_collect_report.for_rep_in_reports_.assert_new_rep_outcome_", "embedding": null, "metadata": {"file_path": "testing/test_reports.py", "file_name": "test_reports.py", "file_type": "text/x-python", "category": "test", "start_line": 271, "end_line": 291, "span_ids": ["TestHooks.test_collect_report"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestHooks:\n\n def test_collect_report(self, testdir, pytestconfig):\n testdir.makepyfile(\n \"\"\"\n def test_a(): assert False\n def test_b(): pass\n \"\"\"\n )\n reprec = testdir.inline_run()\n reports = reprec.getreports(\"pytest_collectreport\")\n assert len(reports) == 2\n for rep in reports:\n data = pytestconfig.hook.pytest_report_to_serializable(\n config=pytestconfig, report=rep\n )\n assert data[\"_report_type\"] == \"CollectReport\"\n new_rep = pytestconfig.hook.pytest_report_from_serializable(\n config=pytestconfig, data=data\n )\n assert new_rep.nodeid == rep.nodeid\n assert new_rep.when == \"collect\"\n assert new_rep.outcome == rep.outcome", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestHooks.test_invalid_report_types_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_reports.py_TestHooks.test_invalid_report_types_", "embedding": null, "metadata": {"file_path": "testing/test_reports.py", "file_name": "test_reports.py", "file_type": "text/x-python", "category": "test", "start_line": 293, "end_line": 314, "span_ids": ["TestHooks.test_invalid_report_types"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestHooks:\n\n @pytest.mark.parametrize(\n \"hook_name\", [\"pytest_runtest_logreport\", \"pytest_collectreport\"]\n )\n def test_invalid_report_types(self, testdir, pytestconfig, hook_name):\n testdir.makepyfile(\n \"\"\"\n def test_a(): pass\n \"\"\"\n )\n reprec = testdir.inline_run()\n reports = reprec.getreports(hook_name)\n assert reports\n rep = reports[0]\n data = pytestconfig.hook.pytest_report_to_serializable(\n config=pytestconfig, report=rep\n )\n data[\"_report_type\"] = \"Unknown\"\n with pytest.raises(AssertionError):\n _ = pytestconfig.hook.pytest_report_from_serializable(\n config=pytestconfig, data=data\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_resultlog.py_from___future___import_ab_test_write_log_entry.None_14": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_resultlog.py_from___future___import_ab_test_write_log_entry.None_14", "embedding": null, "metadata": {"file_path": "testing/test_resultlog.py", "file_name": "test_resultlog.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 54, "span_ids": ["imports", "test_write_log_entry", "impl"], "tokens": 459}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport py\n\nimport _pytest._code\nimport pytest\nfrom _pytest.resultlog import pytest_configure\nfrom _pytest.resultlog import pytest_unconfigure\nfrom _pytest.resultlog import ResultLog\n\npytestmark = pytest.mark.filterwarnings(\"ignore:--result-log is deprecated\")\n\n\ndef test_write_log_entry():\n reslog = ResultLog(None, None)\n reslog.logfile = py.io.TextIO()\n reslog.write_log_entry(\"name\", \".\", \"\")\n entry = reslog.logfile.getvalue()\n assert entry[-1] == \"\\n\"\n entry_lines = entry.splitlines()\n assert len(entry_lines) == 1\n assert entry_lines[0] == \". name\"\n\n reslog.logfile = py.io.TextIO()\n reslog.write_log_entry(\"name\", \"s\", \"Skipped\")\n entry = reslog.logfile.getvalue()\n assert entry[-1] == \"\\n\"\n entry_lines = entry.splitlines()\n assert len(entry_lines) == 2\n assert entry_lines[0] == \"s name\"\n assert entry_lines[1] == \" Skipped\"\n\n reslog.logfile = py.io.TextIO()\n reslog.write_log_entry(\"name\", \"s\", \"Skipped\\n\")\n entry = reslog.logfile.getvalue()\n assert entry[-1] == \"\\n\"\n entry_lines = entry.splitlines()\n assert len(entry_lines) == 2\n assert entry_lines[0] == \"s name\"\n assert entry_lines[1] == \" Skipped\"\n\n reslog.logfile = py.io.TextIO()\n longrepr = \" tb1\\n tb 2\\nE tb3\\nSome Error\"\n reslog.write_log_entry(\"name\", \"F\", longrepr)\n entry = reslog.logfile.getvalue()\n assert entry[-1] == \"\\n\"\n entry_lines = entry.splitlines()\n assert len(entry_lines) == 5\n assert entry_lines[0] == \"F name\"\n assert entry_lines[1:] == [\" \" + line for line in longrepr.splitlines()]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_resultlog.py_TestWithFunctionIntegration_TestWithFunctionIntegration.test_collection_report.assert_XXX_in_join_l": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_resultlog.py_TestWithFunctionIntegration_TestWithFunctionIntegration.test_collection_report.assert_XXX_in_join_l", "embedding": null, "metadata": {"file_path": "testing/test_resultlog.py", "file_name": "test_resultlog.py", "file_type": "text/x-python", "category": "test", "start_line": 57, "end_line": 80, "span_ids": ["TestWithFunctionIntegration", "TestWithFunctionIntegration.getresultlog", "TestWithFunctionIntegration.test_collection_report"], "tokens": 242}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestWithFunctionIntegration(object):\n # XXX (hpk) i think that the resultlog plugin should\n # provide a Parser object so that one can remain\n # ignorant regarding formatting details.\n def getresultlog(self, testdir, arg):\n resultlog = testdir.tmpdir.join(\"resultlog\")\n testdir.plugins.append(\"resultlog\")\n args = [\"--resultlog=%s\" % resultlog] + [arg]\n testdir.runpytest(*args)\n return [x for x in resultlog.readlines(cr=0) if x]\n\n def test_collection_report(self, testdir):\n ok = testdir.makepyfile(test_collection_ok=\"\")\n fail = testdir.makepyfile(test_collection_fail=\"XXX\")\n lines = self.getresultlog(testdir, ok)\n assert not lines\n\n lines = self.getresultlog(testdir, fail)\n assert lines\n assert lines[0].startswith(\"F \")\n assert lines[0].endswith(\"test_collection_fail.py\"), lines[0]\n for x in lines[1:]:\n assert x.startswith(\" \")\n assert \"XXX\" in \"\".join(lines[1:])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_resultlog.py_TestWithFunctionIntegration.test_log_test_outcomes_TestWithFunctionIntegration.test_log_test_outcomes.assert_len_lines_15": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_resultlog.py_TestWithFunctionIntegration.test_log_test_outcomes_TestWithFunctionIntegration.test_log_test_outcomes.assert_len_lines_15", "embedding": null, "metadata": {"file_path": "testing/test_resultlog.py", "file_name": "test_resultlog.py", "file_type": "text/x-python", "category": "test", "start_line": 82, "end_line": 115, "span_ids": ["TestWithFunctionIntegration.test_log_test_outcomes"], "tokens": 315}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestWithFunctionIntegration(object):\n # XXX (hpk) i think that the resultlog plugin should\n # provide a Parser object so that one can remain\n\n def test_log_test_outcomes(self, testdir):\n mod = testdir.makepyfile(\n test_mod=\"\"\"\n import pytest\n def test_pass(): pass\n def test_skip(): pytest.skip(\"hello\")\n def test_fail(): raise ValueError(\"FAIL\")\n\n @pytest.mark.xfail\n def test_xfail(): raise ValueError(\"XFAIL\")\n @pytest.mark.xfail\n def test_xpass(): pass\n\n \"\"\"\n )\n lines = self.getresultlog(testdir, mod)\n assert len(lines) >= 3\n assert lines[0].startswith(\". \")\n assert lines[0].endswith(\"test_pass\")\n assert lines[1].startswith(\"s \"), lines[1]\n assert lines[1].endswith(\"test_skip\")\n assert lines[2].find(\"hello\") != -1\n\n assert lines[3].startswith(\"F \")\n assert lines[3].endswith(\"test_fail\")\n tb = \"\".join(lines[4:8])\n assert tb.find('raise ValueError(\"FAIL\")') != -1\n\n assert lines[8].startswith(\"x \")\n tb = \"\".join(lines[8:14])\n assert tb.find('raise ValueError(\"XFAIL\")') != -1\n\n assert lines[14].startswith(\"X \")\n assert len(lines) == 15", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_resultlog.py_TestWithFunctionIntegration.test_internal_exception_TestWithFunctionIntegration.test_internal_exception.assert_ValueError_in_en": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_resultlog.py_TestWithFunctionIntegration.test_internal_exception_TestWithFunctionIntegration.test_internal_exception.assert_ValueError_in_en", "embedding": null, "metadata": {"file_path": "testing/test_resultlog.py", "file_name": "test_resultlog.py", "file_type": "text/x-python", "category": "test", "start_line": 117, "end_line": 134, "span_ids": ["TestWithFunctionIntegration.test_internal_exception"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestWithFunctionIntegration(object):\n # XXX (hpk) i think that the resultlog plugin should\n # provide a Parser object so that one can remain\n\n @pytest.mark.parametrize(\"style\", (\"native\", \"long\", \"short\"))\n def test_internal_exception(self, style):\n # they are produced for example by a teardown failing\n # at the end of the run or a failing hook invocation\n try:\n raise ValueError\n except ValueError:\n excinfo = _pytest._code.ExceptionInfo.from_current()\n reslog = ResultLog(None, py.io.TextIO())\n reslog.pytest_internalerror(excinfo.getrepr(style=style))\n entry = reslog.logfile.getvalue()\n entry_lines = entry.splitlines()\n\n assert entry_lines[0].startswith(\"! \")\n if style != \"native\":\n assert os.path.basename(__file__)[:-9] in entry_lines[0] # .pyc/class\n assert entry_lines[-1][0] == \" \"\n assert \"ValueError\" in entry", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_resultlog.py_test_generic_test_generic.LineMatcher_lines_fnmatc": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_resultlog.py_test_generic_test_generic.LineMatcher_lines_fnmatc", "embedding": null, "metadata": {"file_path": "testing/test_resultlog.py", "file_name": "test_resultlog.py", "file_type": "text/x-python", "category": "test", "start_line": 137, "end_line": 166, "span_ids": ["test_generic"], "tokens": 191}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_generic(testdir, LineMatcher):\n testdir.plugins.append(\"resultlog\")\n testdir.makepyfile(\n \"\"\"\n import pytest\n def test_pass():\n pass\n def test_fail():\n assert 0\n def test_skip():\n pytest.skip(\"\")\n @pytest.mark.xfail\n def test_xfail():\n assert 0\n @pytest.mark.xfail(run=False)\n def test_xfail_norun():\n assert 0\n \"\"\"\n )\n testdir.runpytest(\"--resultlog=result.log\")\n lines = testdir.tmpdir.join(\"result.log\").readlines(cr=0)\n LineMatcher(lines).fnmatch_lines(\n [\n \". *:test_pass\",\n \"F *:test_fail\",\n \"s *:test_skip\",\n \"x *:test_xfail\",\n \"x *:test_xfail_norun\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_resultlog.py_test_makedir_for_resultlog_test_no_resultlog_on_slaves.None_4": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_resultlog.py_test_makedir_for_resultlog_test_no_resultlog_on_slaves.None_4", "embedding": null, "metadata": {"file_path": "testing/test_resultlog.py", "file_name": "test_resultlog.py", "file_type": "text/x-python", "category": "test", "start_line": 169, "end_line": 197, "span_ids": ["test_makedir_for_resultlog", "test_no_resultlog_on_slaves"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_makedir_for_resultlog(testdir, LineMatcher):\n \"\"\"--resultlog should automatically create directories for the log file\"\"\"\n testdir.plugins.append(\"resultlog\")\n testdir.makepyfile(\n \"\"\"\n import pytest\n def test_pass():\n pass\n \"\"\"\n )\n testdir.runpytest(\"--resultlog=path/to/result.log\")\n lines = testdir.tmpdir.join(\"path/to/result.log\").readlines(cr=0)\n LineMatcher(lines).fnmatch_lines([\". *:test_pass\"])\n\n\ndef test_no_resultlog_on_slaves(testdir):\n config = testdir.parseconfig(\"-p\", \"resultlog\", \"--resultlog=resultlog\")\n\n assert not hasattr(config, \"_resultlog\")\n pytest_configure(config)\n assert hasattr(config, \"_resultlog\")\n pytest_unconfigure(config)\n assert not hasattr(config, \"_resultlog\")\n\n config.slaveinput = {}\n pytest_configure(config)\n assert not hasattr(config, \"_resultlog\")\n pytest_unconfigure(config)\n assert not hasattr(config, \"_resultlog\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_resultlog.py_test_failure_issue380_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_resultlog.py_test_failure_issue380_", "embedding": null, "metadata": {"file_path": "testing/test_resultlog.py", "file_name": "test_resultlog.py", "file_type": "text/x-python", "category": "test", "start_line": 200, "end_line": 221, "span_ids": ["test_failure_issue380"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_failure_issue380(testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n class MyCollector(pytest.File):\n def collect(self):\n raise ValueError()\n def repr_failure(self, excinfo):\n return \"somestring\"\n def pytest_collect_file(path, parent):\n return MyCollector(parent=parent, fspath=path)\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n def test_func():\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"--resultlog=log\")\n assert result.ret == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py__coding_utf_8__TestSetupState.test_setup_fails_and_failure_is_cached.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py__coding_utf_8__TestSetupState.test_setup_fails_and_failure_is_cached.None_1", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 49, "span_ids": ["TestSetupState.test_setup", "TestSetupState.test_setup_fails_and_failure_is_cached", "TestSetupState", "docstring", "TestSetupState.test_teardown_exact_stack_empty", "imports"], "tokens": 293}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport inspect\nimport os\nimport sys\nimport types\n\nimport py\n\nimport _pytest._code\nimport pytest\nfrom _pytest import main\nfrom _pytest import outcomes\nfrom _pytest import reports\nfrom _pytest import runner\n\n\nclass TestSetupState(object):\n def test_setup(self, testdir):\n ss = runner.SetupState()\n item = testdir.getitem(\"def test_func(): pass\")\n values = [1]\n ss.prepare(item)\n ss.addfinalizer(values.pop, colitem=item)\n assert values\n ss._pop_and_teardown()\n assert not values\n\n def test_teardown_exact_stack_empty(self, testdir):\n item = testdir.getitem(\"def test_func(): pass\")\n ss = runner.SetupState()\n ss.teardown_exact(item, None)\n ss.teardown_exact(item, None)\n ss.teardown_exact(item, None)\n\n def test_setup_fails_and_failure_is_cached(self, testdir):\n item = testdir.getitem(\n \"\"\"\n def setup_module(mod):\n raise ValueError(42)\n def test_func(): pass\n \"\"\"\n )\n ss = runner.SetupState()\n pytest.raises(ValueError, lambda: ss.prepare(item))\n pytest.raises(ValueError, lambda: ss.prepare(item))", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_TestSetupState.test_teardown_multiple_one_fails_TestSetupState.test_teardown_multiple_one_fails.assert_r_fin3_fin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_TestSetupState.test_teardown_multiple_one_fails_TestSetupState.test_teardown_multiple_one_fails.assert_r_fin3_fin", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 51, "end_line": 71, "span_ids": ["TestSetupState.test_teardown_multiple_one_fails"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSetupState(object):\n\n def test_teardown_multiple_one_fails(self, testdir):\n r = []\n\n def fin1():\n r.append(\"fin1\")\n\n def fin2():\n raise Exception(\"oops\")\n\n def fin3():\n r.append(\"fin3\")\n\n item = testdir.getitem(\"def test_func(): pass\")\n ss = runner.SetupState()\n ss.addfinalizer(fin1, item)\n ss.addfinalizer(fin2, item)\n ss.addfinalizer(fin3, item)\n with pytest.raises(Exception) as err:\n ss._callfinalizers(item)\n assert err.value.args == (\"oops\",)\n assert r == [\"fin3\", \"fin1\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_TestSetupState.test_teardown_multiple_fail_TestSetupState.test_teardown_multiple_fail.assert_err_value_args_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_TestSetupState.test_teardown_multiple_fail_TestSetupState.test_teardown_multiple_fail.assert_err_value_args_", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 73, "end_line": 88, "span_ids": ["TestSetupState.test_teardown_multiple_fail"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSetupState(object):\n\n def test_teardown_multiple_fail(self, testdir):\n # Ensure the first exception is the one which is re-raised.\n # Ideally both would be reported however.\n def fin1():\n raise Exception(\"oops1\")\n\n def fin2():\n raise Exception(\"oops2\")\n\n item = testdir.getitem(\"def test_func(): pass\")\n ss = runner.SetupState()\n ss.addfinalizer(fin1, item)\n ss.addfinalizer(fin2, item)\n with pytest.raises(Exception) as err:\n ss._callfinalizers(item)\n assert err.value.args == (\"oops2\",)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_TestSetupState.test_teardown_multiple_scopes_one_fails_TestSetupState.test_teardown_multiple_scopes_one_fails.assert_module_teardown": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_TestSetupState.test_teardown_multiple_scopes_one_fails_TestSetupState.test_teardown_multiple_scopes_one_fails.assert_module_teardown", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 90, "end_line": 106, "span_ids": ["TestSetupState.test_teardown_multiple_scopes_one_fails"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSetupState(object):\n\n def test_teardown_multiple_scopes_one_fails(self, testdir):\n module_teardown = []\n\n def fin_func():\n raise Exception(\"oops1\")\n\n def fin_module():\n module_teardown.append(\"fin_module\")\n\n item = testdir.getitem(\"def test_func(): pass\")\n ss = runner.SetupState()\n ss.addfinalizer(fin_module, item.listchain()[-2])\n ss.addfinalizer(fin_func, item)\n ss.prepare(item)\n with pytest.raises(Exception, match=\"oops1\"):\n ss.teardown_exact(item, None)\n assert module_teardown", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests_BaseFunctionalTests.test_failfunction._assert_isinstance_rep_l": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests_BaseFunctionalTests.test_failfunction._assert_isinstance_rep_l", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 109, "end_line": 136, "span_ids": ["BaseFunctionalTests", "BaseFunctionalTests.test_passfunction", "BaseFunctionalTests.test_failfunction"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BaseFunctionalTests(object):\n def test_passfunction(self, testdir):\n reports = testdir.runitem(\n \"\"\"\n def test_func():\n pass\n \"\"\"\n )\n rep = reports[1]\n assert rep.passed\n assert not rep.failed\n assert rep.outcome == \"passed\"\n assert not rep.longrepr\n\n def test_failfunction(self, testdir):\n reports = testdir.runitem(\n \"\"\"\n def test_func():\n assert 0\n \"\"\"\n )\n rep = reports[1]\n assert not rep.passed\n assert not rep.skipped\n assert rep.failed\n assert rep.when == \"call\"\n assert rep.outcome == \"failed\"\n # assert isinstance(rep.longrepr, ReprExceptionInfo)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_skipfunction_BaseFunctionalTests.test_skipfunction._assert_not_rep_skipped_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_skipfunction_BaseFunctionalTests.test_skipfunction._assert_not_rep_skipped_", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 138, "end_line": 156, "span_ids": ["BaseFunctionalTests.test_skipfunction"], "tokens": 150}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BaseFunctionalTests(object):\n\n def test_skipfunction(self, testdir):\n reports = testdir.runitem(\n \"\"\"\n import pytest\n def test_func():\n pytest.skip(\"hello\")\n \"\"\"\n )\n rep = reports[1]\n assert not rep.failed\n assert not rep.passed\n assert rep.skipped\n assert rep.outcome == \"skipped\"\n # assert rep.skipped.when == \"call\"\n # assert rep.skipped.when == \"call\"\n # assert rep.skipped == \"%sreason == \"hello\"\n # assert rep.skipped.location.lineno == 3\n # assert rep.skipped.location.path\n # assert not rep.skipped.failurerepr", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_skip_in_setup_function_BaseFunctionalTests.test_skip_in_setup_function._teardown": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_skip_in_setup_function_BaseFunctionalTests.test_skip_in_setup_function._teardown", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 158, "end_line": 177, "span_ids": ["BaseFunctionalTests.test_skip_in_setup_function"], "tokens": 144}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BaseFunctionalTests(object):\n\n def test_skip_in_setup_function(self, testdir):\n reports = testdir.runitem(\n \"\"\"\n import pytest\n def setup_function(func):\n pytest.skip(\"hello\")\n def test_func():\n pass\n \"\"\"\n )\n print(reports)\n rep = reports[0]\n assert not rep.failed\n assert not rep.passed\n assert rep.skipped\n # assert rep.skipped.reason == \"hello\"\n # assert rep.skipped.location.lineno == 3\n # assert rep.skipped.location.lineno == 3\n assert len(reports) == 2\n assert reports[1].passed # teardown", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_failure_in_setup_function_BaseFunctionalTests.test_failure_in_teardown_function.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_failure_in_setup_function_BaseFunctionalTests.test_failure_in_teardown_function.None_1", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 179, "end_line": 214, "span_ids": ["BaseFunctionalTests.test_failure_in_teardown_function", "BaseFunctionalTests.test_failure_in_setup_function"], "tokens": 230}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BaseFunctionalTests(object):\n\n def test_failure_in_setup_function(self, testdir):\n reports = testdir.runitem(\n \"\"\"\n import pytest\n def setup_function(func):\n raise ValueError(42)\n def test_func():\n pass\n \"\"\"\n )\n rep = reports[0]\n assert not rep.skipped\n assert not rep.passed\n assert rep.failed\n assert rep.when == \"setup\"\n assert len(reports) == 2\n\n def test_failure_in_teardown_function(self, testdir):\n reports = testdir.runitem(\n \"\"\"\n import pytest\n def teardown_function(func):\n raise ValueError(42)\n def test_func():\n pass\n \"\"\"\n )\n print(reports)\n assert len(reports) == 3\n rep = reports[2]\n assert not rep.skipped\n assert not rep.passed\n assert rep.failed\n assert rep.when == \"teardown\"\n # assert rep.longrepr.reprcrash.lineno == 3\n # assert rep.longrepr.reprtraceback.reprentries", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_custom_failure_repr_BaseFunctionalTests.test_custom_failure_repr._assert_rep_failed_failu": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_custom_failure_repr_BaseFunctionalTests.test_custom_failure_repr._assert_rep_failed_failu", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 216, "end_line": 239, "span_ids": ["BaseFunctionalTests.test_custom_failure_repr"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BaseFunctionalTests(object):\n\n def test_custom_failure_repr(self, testdir):\n testdir.makepyfile(\n conftest=\"\"\"\n import pytest\n class Function(pytest.Function):\n def repr_failure(self, excinfo):\n return \"hello\"\n \"\"\"\n )\n reports = testdir.runitem(\n \"\"\"\n import pytest\n def test_func():\n assert 0\n \"\"\"\n )\n rep = reports[1]\n assert not rep.skipped\n assert not rep.passed\n assert rep.failed\n # assert rep.outcome.when == \"call\"\n # assert rep.failed.where.lineno == 3\n # assert rep.failed.where.path.basename == \"test_func.py\"\n # assert rep.failed.failurerepr == \"hello\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_teardown_final_returncode_BaseFunctionalTests.test_logstart_logfinish_hooks.for_rep_in_reps_.assert_rep_location_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_teardown_final_returncode_BaseFunctionalTests.test_logstart_logfinish_hooks.for_rep_in_reps_.assert_rep_location_", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 241, "end_line": 267, "span_ids": ["BaseFunctionalTests.test_teardown_final_returncode", "BaseFunctionalTests.test_logstart_logfinish_hooks"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BaseFunctionalTests(object):\n\n def test_teardown_final_returncode(self, testdir):\n rec = testdir.inline_runsource(\n \"\"\"\n def test_func():\n pass\n def teardown_function(func):\n raise ValueError(42)\n \"\"\"\n )\n assert rec.ret == 1\n\n def test_logstart_logfinish_hooks(self, testdir):\n rec = testdir.inline_runsource(\n \"\"\"\n import pytest\n def test_func():\n pass\n \"\"\"\n )\n reps = rec.getcalls(\"pytest_runtest_logstart pytest_runtest_logfinish\")\n assert [x._name for x in reps] == [\n \"pytest_runtest_logstart\",\n \"pytest_runtest_logfinish\",\n ]\n for rep in reps:\n assert rep.nodeid == \"test_logstart_logfinish_hooks.py::test_func\"\n assert rep.location == (\"test_logstart_logfinish_hooks.py\", 1, \"test_func\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_exact_teardown_issue90_BaseFunctionalTests.test_exact_teardown_issue90.assert_reps_5_failed": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_exact_teardown_issue90_BaseFunctionalTests.test_exact_teardown_issue90.assert_reps_5_failed", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 269, "end_line": 307, "span_ids": ["BaseFunctionalTests.test_exact_teardown_issue90"], "tokens": 299}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BaseFunctionalTests(object):\n\n def test_exact_teardown_issue90(self, testdir):\n rec = testdir.inline_runsource(\n \"\"\"\n import pytest\n\n class TestClass(object):\n def test_method(self):\n pass\n def teardown_class(cls):\n raise Exception()\n\n def test_func():\n import sys\n # on python2 exc_info is keept till a function exits\n # so we would end up calling test functions while\n # sys.exc_info would return the indexerror\n # from guessing the lastitem\n excinfo = sys.exc_info()\n import traceback\n assert excinfo[0] is None, \\\n traceback.format_exception(*excinfo)\n def teardown_function(func):\n raise ValueError(42)\n \"\"\"\n )\n reps = rec.getreports(\"pytest_runtest_logreport\")\n print(reps)\n for i in range(2):\n assert reps[i].nodeid.endswith(\"test_method\")\n assert reps[i].passed\n assert reps[2].when == \"teardown\"\n assert reps[2].failed\n assert len(reps) == 6\n for i in range(3, 5):\n assert reps[i].nodeid.endswith(\"test_func\")\n assert reps[i].passed\n assert reps[5].when == \"teardown\"\n assert reps[5].nodeid.endswith(\"test_func\")\n assert reps[5].failed", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_exact_teardown_issue1206_BaseFunctionalTests.test_exact_teardown_issue1206.assert_reps_2_longrepr_r": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_exact_teardown_issue1206_BaseFunctionalTests.test_exact_teardown_issue1206.assert_reps_2_longrepr_r", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 309, "end_line": 343, "span_ids": ["BaseFunctionalTests.test_exact_teardown_issue1206"], "tokens": 282}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BaseFunctionalTests(object):\n\n def test_exact_teardown_issue1206(self, testdir):\n \"\"\"issue shadowing error with wrong number of arguments on teardown_method.\"\"\"\n rec = testdir.inline_runsource(\n \"\"\"\n import pytest\n\n class TestClass(object):\n def teardown_method(self, x, y, z):\n pass\n\n def test_method(self):\n assert True\n \"\"\"\n )\n reps = rec.getreports(\"pytest_runtest_logreport\")\n print(reps)\n assert len(reps) == 3\n #\n assert reps[0].nodeid.endswith(\"test_method\")\n assert reps[0].passed\n assert reps[0].when == \"setup\"\n #\n assert reps[1].nodeid.endswith(\"test_method\")\n assert reps[1].passed\n assert reps[1].when == \"call\"\n #\n assert reps[2].nodeid.endswith(\"test_method\")\n assert reps[2].failed\n assert reps[2].when == \"teardown\"\n assert reps[2].longrepr.reprcrash.message in (\n # python3 error\n \"TypeError: teardown_method() missing 2 required positional arguments: 'y' and 'z'\",\n # python2 error\n \"TypeError: teardown_method() takes exactly 4 arguments (2 given)\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_failure_in_setup_function_ignores_custom_repr_BaseFunctionalTests.test_failure_in_setup_function_ignores_custom_repr._assert_instanace_rep_fa": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_failure_in_setup_function_ignores_custom_repr_BaseFunctionalTests.test_failure_in_setup_function_ignores_custom_repr._assert_instanace_rep_fa", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 345, "end_line": 371, "span_ids": ["BaseFunctionalTests.test_failure_in_setup_function_ignores_custom_repr"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BaseFunctionalTests(object):\n\n def test_failure_in_setup_function_ignores_custom_repr(self, testdir):\n testdir.makepyfile(\n conftest=\"\"\"\n import pytest\n class Function(pytest.Function):\n def repr_failure(self, excinfo):\n assert 0\n \"\"\"\n )\n reports = testdir.runitem(\n \"\"\"\n def setup_function(func):\n raise ValueError(42)\n def test_func():\n pass\n \"\"\"\n )\n assert len(reports) == 2\n rep = reports[0]\n print(rep)\n assert not rep.skipped\n assert not rep.passed\n assert rep.failed\n # assert rep.outcome.when == \"setup\"\n # assert rep.outcome.where.lineno == 3\n # assert rep.outcome.where.path.basename == \"test_func.py\"\n # assert instanace(rep.failed.failurerepr, PythonFailureRepr)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_systemexit_does_not_bail_out_BaseFunctionalTests.test_exit_propagates.try_.else_.pytest_fail_did_not_rais": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_BaseFunctionalTests.test_systemexit_does_not_bail_out_BaseFunctionalTests.test_exit_propagates.try_.else_.pytest_fail_did_not_rais", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 373, "end_line": 399, "span_ids": ["BaseFunctionalTests.test_exit_propagates", "BaseFunctionalTests.test_systemexit_does_not_bail_out"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class BaseFunctionalTests(object):\n\n def test_systemexit_does_not_bail_out(self, testdir):\n try:\n reports = testdir.runitem(\n \"\"\"\n def test_func():\n raise SystemExit(42)\n \"\"\"\n )\n except SystemExit:\n pytest.fail(\"runner did not catch SystemExit\")\n rep = reports[1]\n assert rep.failed\n assert rep.when == \"call\"\n\n def test_exit_propagates(self, testdir):\n try:\n testdir.runitem(\n \"\"\"\n import pytest\n def test_func():\n raise pytest.exit.Exception()\n \"\"\"\n )\n except pytest.exit.Exception:\n pass\n else:\n pytest.fail(\"did not raise\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_TestExecutionNonForked_TestExecutionForked.test_suicide.assert_rep_when_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_TestExecutionNonForked_TestExecutionForked.test_suicide.assert_rep_when_", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 402, "end_line": 441, "span_ids": ["TestExecutionNonForked.test_keyboardinterrupt_propagates", "TestExecutionNonForked", "TestExecutionForked", "TestExecutionForked.test_suicide", "TestExecutionNonForked.getrunner", "TestExecutionForked.getrunner"], "tokens": 228}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestExecutionNonForked(BaseFunctionalTests):\n def getrunner(self):\n def f(item):\n return runner.runtestprotocol(item, log=False)\n\n return f\n\n def test_keyboardinterrupt_propagates(self, testdir):\n try:\n testdir.runitem(\n \"\"\"\n def test_func():\n raise KeyboardInterrupt(\"fake\")\n \"\"\"\n )\n except KeyboardInterrupt:\n pass\n else:\n pytest.fail(\"did not raise\")\n\n\nclass TestExecutionForked(BaseFunctionalTests):\n pytestmark = pytest.mark.skipif(\"not hasattr(os, 'fork')\")\n\n def getrunner(self):\n # XXX re-arrange this test to live in pytest-xdist\n boxed = pytest.importorskip(\"xdist.boxed\")\n return boxed.forked_run_report\n\n def test_suicide(self, testdir):\n reports = testdir.runitem(\n \"\"\"\n def test_func():\n import os\n os.kill(os.getpid(), 15)\n \"\"\"\n )\n rep = reports[0]\n assert rep.failed\n assert rep.when == \"???\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_TestSessionReports_TestSessionReports.test_collect_result.assert_res_1_name_Te": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_TestSessionReports_TestSessionReports.test_collect_result.assert_res_1_name_Te", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 444, "end_line": 465, "span_ids": ["TestSessionReports.test_collect_result", "TestSessionReports"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSessionReports(object):\n def test_collect_result(self, testdir):\n col = testdir.getmodulecol(\n \"\"\"\n def test_func1():\n pass\n class TestClass(object):\n pass\n \"\"\"\n )\n rep = runner.collect_one_node(col)\n assert not rep.failed\n assert not rep.skipped\n assert rep.passed\n locinfo = rep.location\n assert locinfo[0] == col.fspath.basename\n assert not locinfo[1]\n assert locinfo[2] == col.fspath.basename\n res = rep.result\n assert len(res) == 2\n assert res[0].name == \"test_func1\"\n assert res[1].name == \"TestClass\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_reporttypes_test_report_extra_parameters.assert_report_newthing_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_reporttypes_test_report_extra_parameters.assert_report_newthing_", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 468, "end_line": 481, "span_ids": ["test_report_extra_parameters", "impl"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "reporttypes = [reports.BaseReport, reports.TestReport, reports.CollectReport]\n\n\n@pytest.mark.parametrize(\n \"reporttype\", reporttypes, ids=[x.__name__ for x in reporttypes]\n)\ndef test_report_extra_parameters(reporttype):\n if hasattr(inspect, \"signature\"):\n args = list(inspect.signature(reporttype.__init__).parameters.keys())[1:]\n else:\n args = inspect.getargspec(reporttype.__init__)[0][1:]\n basekw = dict.fromkeys(args, [])\n report = reporttype(newthing=1, **basekw)\n assert report.newthing == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_callinfo_test_callinfo.assert_exc_in_repr_ci_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_callinfo_test_callinfo.assert_exc_in_repr_ci_", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 484, "end_line": 496, "span_ids": ["test_callinfo"], "tokens": 135}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_callinfo():\n ci = runner.CallInfo.from_call(lambda: 0, \"123\")\n assert ci.when == \"123\"\n assert ci.result == 0\n assert \"result\" in repr(ci)\n assert repr(ci) == \"\"\n\n ci = runner.CallInfo.from_call(lambda: 0 / 0, \"123\")\n assert ci.when == \"123\"\n assert not hasattr(ci, \"result\")\n assert repr(ci) == \"\"\n assert ci.excinfo\n assert \"exc\" in repr(ci)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py__design_question_do_we__test_runtest_in_module_ordering.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py__design_question_do_we__test_runtest_in_module_ordering.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 499, "end_line": 532, "span_ids": ["test_callinfo", "test_runtest_in_module_ordering"], "tokens": 263}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# design question: do we want general hooks in python files?\n# then something like the following functional tests makes sense\n\n\n@pytest.mark.xfail\ndef test_runtest_in_module_ordering(testdir):\n p1 = testdir.makepyfile(\n \"\"\"\n import pytest\n def pytest_runtest_setup(item): # runs after class-level!\n item.function.mylist.append(\"module\")\n class TestClass(object):\n def pytest_runtest_setup(self, item):\n assert not hasattr(item.function, 'mylist')\n item.function.mylist = ['class']\n @pytest.fixture\n def mylist(self, request):\n return request.function.mylist\n def pytest_runtest_call(self, item, __multicall__):\n try:\n __multicall__.execute()\n except ValueError:\n pass\n def test_hello1(self, mylist):\n assert mylist == ['class', 'module'], mylist\n raise ValueError()\n def test_hello2(self, mylist):\n assert mylist == ['class', 'module'], mylist\n def pytest_runtest_teardown(item):\n del item.function.mylist\n \"\"\"\n )\n result = testdir.runpytest(p1)\n result.stdout.fnmatch_lines([\"*2 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_outcomeexception_exceptionattributes_test_pytest_exit_msg.result_stderr_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_outcomeexception_exceptionattributes_test_pytest_exit_msg.result_stderr_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 535, "end_line": 571, "span_ids": ["test_outcomeexception_exceptionattributes", "test_pytest_exit_msg", "test_pytest_exit", "test_pytest_fail", "test_outcomeexception_passes_except_Exception"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_outcomeexception_exceptionattributes():\n outcome = outcomes.OutcomeException(\"test\")\n assert outcome.args[0] == outcome.msg\n\n\ndef test_outcomeexception_passes_except_Exception():\n with pytest.raises(outcomes.OutcomeException):\n try:\n raise outcomes.OutcomeException(\"test\")\n except Exception:\n pass\n\n\ndef test_pytest_exit():\n with pytest.raises(pytest.exit.Exception) as excinfo:\n pytest.exit(\"hello\")\n assert excinfo.errisinstance(pytest.exit.Exception)\n\n\ndef test_pytest_fail():\n with pytest.raises(pytest.fail.Exception) as excinfo:\n pytest.fail(\"hello\")\n s = excinfo.exconly(tryshort=True)\n assert s.startswith(\"Failed\")\n\n\ndef test_pytest_exit_msg(testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n\n def pytest_configure(config):\n pytest.exit('oh noes')\n \"\"\"\n )\n result = testdir.runpytest()\n result.stderr.fnmatch_lines([\"Exit: oh noes\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_pytest_exit_returncode_test_pytest_exit_returncode.assert_result_ret_98": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_pytest_exit_returncode_test_pytest_exit_returncode.assert_result_ret_98", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 574, "end_line": 606, "span_ids": ["test_pytest_exit_returncode"], "tokens": 247}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pytest_exit_returncode(testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n def test_foo():\n pytest.exit(\"some exit msg\", 99)\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*! *Exit: some exit msg !*\"])\n # Assert no output on stderr, except for unreliable ResourceWarnings.\n # (https://github.com/pytest-dev/pytest/issues/5088)\n assert [\n x\n for x in result.stderr.lines\n if not x.startswith(\"Exception ignored in:\")\n and not x.startswith(\"ResourceWarning\")\n ] == [\"\"]\n assert result.ret == 99\n\n # It prints to stderr also in case of exit during pytest_sessionstart.\n testdir.makeconftest(\n \"\"\"\n import pytest\n\n def pytest_sessionstart():\n pytest.exit(\"during_sessionstart\", 98)\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*! *Exit: during_sessionstart !*\"])\n assert result.stderr.lines == [\"Exit: during_sessionstart\", \"\"]\n assert result.ret == 98", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_pytest_fail_notrace_runtest_test_pytest_fail_notrace_collection.assert_def_some_internal": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_pytest_fail_notrace_runtest_test_pytest_fail_notrace_collection.assert_def_some_internal", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 609, "end_line": 637, "span_ids": ["test_pytest_fail_notrace_runtest", "test_pytest_fail_notrace_collection"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pytest_fail_notrace_runtest(testdir):\n \"\"\"Test pytest.fail(..., pytrace=False) does not show tracebacks during test run.\"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n def test_hello():\n pytest.fail(\"hello\", pytrace=False)\n def teardown_function(function):\n pytest.fail(\"world\", pytrace=False)\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"world\", \"hello\"])\n assert \"def teardown_function\" not in result.stdout.str()\n\n\ndef test_pytest_fail_notrace_collection(testdir):\n \"\"\"Test pytest.fail(..., pytrace=False) does not show tracebacks during collection.\"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n def some_internal_function():\n pytest.fail(\"hello\", pytrace=False)\n some_internal_function()\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"hello\"])\n assert \"def some_internal_function()\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_pytest_fail_notrace_non_ascii_test_pytest_fail_notrace_non_ascii.assert_def_test_hello_n": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_pytest_fail_notrace_non_ascii_test_pytest_fail_notrace_non_ascii.assert_def_test_hello_n", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 640, "end_line": 661, "span_ids": ["test_pytest_fail_notrace_non_ascii"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"str_prefix\", [\"u\", \"\"])\ndef test_pytest_fail_notrace_non_ascii(testdir, str_prefix):\n \"\"\"Fix pytest.fail with pytrace=False with non-ascii characters (#1178).\n\n This tests with native and unicode strings containing non-ascii chars.\n \"\"\"\n testdir.makepyfile(\n u\"\"\"\n # coding: utf-8\n import pytest\n\n def test_hello():\n pytest.fail(%s'oh oh: \u263a', pytrace=False)\n \"\"\"\n % str_prefix\n )\n result = testdir.runpytest()\n if sys.version_info[0] >= 3:\n result.stdout.fnmatch_lines([\"*test_hello*\", \"oh oh: \u263a\"])\n else:\n result.stdout.fnmatch_lines([\"*test_hello*\", \"oh oh: *\"])\n assert \"def test_hello\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_pytest_no_tests_collected_exit_status_test_exception_printing_skip.try_.except_pytest_skip_Except.assert_s_startswith_Skip": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_pytest_no_tests_collected_exit_status_test_exception_printing_skip.try_.except_pytest_skip_Except.assert_s_startswith_Skip", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 664, "end_line": 692, "span_ids": ["test_exception_printing_skip", "test_pytest_no_tests_collected_exit_status"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pytest_no_tests_collected_exit_status(testdir):\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*collected 0 items*\"])\n assert result.ret == main.EXIT_NOTESTSCOLLECTED\n\n testdir.makepyfile(\n test_foo=\"\"\"\n def test_foo():\n assert 1\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*collected 1 item*\"])\n result.stdout.fnmatch_lines([\"*1 passed*\"])\n assert result.ret == main.EXIT_OK\n\n result = testdir.runpytest(\"-k nonmatch\")\n result.stdout.fnmatch_lines([\"*collected 1 item*\"])\n result.stdout.fnmatch_lines([\"*1 deselected*\"])\n assert result.ret == main.EXIT_NOTESTSCOLLECTED\n\n\ndef test_exception_printing_skip():\n try:\n pytest.skip(\"hello\")\n except pytest.skip.Exception:\n excinfo = _pytest._code.ExceptionInfo.from_current()\n s = excinfo.exconly(tryshort=True)\n assert s.startswith(\"Skipped\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_importorskip_test_importorskip.try_.except_pytest_skip_Except.pytest_fail_spurious_ski": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_importorskip_test_importorskip.try_.except_pytest_skip_Except.pytest_fail_spurious_ski", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 695, "end_line": 722, "span_ids": ["test_importorskip"], "tokens": 279}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_importorskip(monkeypatch):\n importorskip = pytest.importorskip\n\n def f():\n importorskip(\"asdlkj\")\n\n try:\n sysmod = importorskip(\"sys\")\n assert sysmod is sys\n # path = pytest.importorskip(\"os.path\")\n # assert path == os.path\n excinfo = pytest.raises(pytest.skip.Exception, f)\n path = py.path.local(excinfo.getrepr().reprcrash.path)\n # check that importorskip reports the actual call\n # in this test the test_runner.py file\n assert path.purebasename == \"test_runner\"\n pytest.raises(SyntaxError, pytest.importorskip, \"x y z\")\n pytest.raises(SyntaxError, pytest.importorskip, \"x=y\")\n mod = types.ModuleType(\"hello123\")\n mod.__version__ = \"1.3\"\n monkeypatch.setitem(sys.modules, \"hello123\", mod)\n with pytest.raises(pytest.skip.Exception):\n pytest.importorskip(\"hello123\", minversion=\"1.3.1\")\n mod2 = pytest.importorskip(\"hello123\", minversion=\"1.3\")\n assert mod2 == mod\n except pytest.skip.Exception:\n print(_pytest._code.ExceptionInfo.from_current())\n pytest.fail(\"spurious skip\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_importorskip_imports_last_module_part_test_importorskip_dev_module.try_.except_pytest_skip_Except.pytest_fail_spurious_ski": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_importorskip_imports_last_module_part_test_importorskip_dev_module.try_.except_pytest_skip_Except.pytest_fail_spurious_ski", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 725, "end_line": 741, "span_ids": ["test_importorskip_dev_module", "test_importorskip_imports_last_module_part"], "tokens": 167}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_importorskip_imports_last_module_part():\n ospath = pytest.importorskip(\"os.path\")\n assert os.path == ospath\n\n\ndef test_importorskip_dev_module(monkeypatch):\n try:\n mod = types.ModuleType(\"mockmodule\")\n mod.__version__ = \"0.13.0.dev-43290\"\n monkeypatch.setitem(sys.modules, \"mockmodule\", mod)\n mod2 = pytest.importorskip(\"mockmodule\", minversion=\"0.12.0\")\n assert mod2 == mod\n with pytest.raises(pytest.skip.Exception):\n pytest.importorskip(\"mockmodule1\", minversion=\"0.14.0\")\n except pytest.skip.Exception:\n print(_pytest._code.ExceptionInfo.from_current())\n pytest.fail(\"spurious skip\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_importorskip_module_level_test_pytest_cmdline_main.assert_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_importorskip_module_level_test_pytest_cmdline_main.assert_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 744, "end_line": 790, "span_ids": ["test_importorskip_module_level", "test_importorskip_custom_reason", "test_pytest_cmdline_main"], "tokens": 300}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_importorskip_module_level(testdir):\n \"\"\"importorskip must be able to skip entire modules when used at module level\"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n foobarbaz = pytest.importorskip(\"foobarbaz\")\n\n def test_foo():\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*collected 0 items / 1 skipped*\"])\n\n\ndef test_importorskip_custom_reason(testdir):\n \"\"\"make sure custom reasons are used\"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n foobarbaz = pytest.importorskip(\"foobarbaz2\", reason=\"just because\")\n\n def test_foo():\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"-ra\")\n result.stdout.fnmatch_lines([\"*just because*\"])\n result.stdout.fnmatch_lines([\"*collected 0 items / 1 skipped*\"])\n\n\ndef test_pytest_cmdline_main(testdir):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n def test_hello():\n assert 1\n if __name__ == '__main__':\n pytest.cmdline.main([__file__])\n \"\"\"\n )\n import subprocess\n\n popen = subprocess.Popen([sys.executable, str(p)], stdout=subprocess.PIPE)\n popen.communicate()\n ret = popen.wait()\n assert ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_unicode_in_longrepr_test_unicode_in_longrepr.assert_UnicodeEncodeErro": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_unicode_in_longrepr_test_unicode_in_longrepr.assert_UnicodeEncodeErro", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 793, "end_line": 814, "span_ids": ["test_unicode_in_longrepr"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_unicode_in_longrepr(testdir):\n testdir.makeconftest(\n \"\"\"\n # -*- coding: utf-8 -*-\n import pytest\n @pytest.hookimpl(hookwrapper=True)\n def pytest_runtest_makereport():\n outcome = yield\n rep = outcome.get_result()\n if rep.when == \"call\":\n rep.longrepr = u'\u00e4'\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n def test_out():\n assert 0\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.ret == 1\n assert \"UnicodeEncodeError\" not in result.stderr.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_failure_in_setup_test_makereport_getsource.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_failure_in_setup_test_makereport_getsource.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 817, "end_line": 840, "span_ids": ["test_makereport_getsource", "test_failure_in_setup"], "tokens": 140}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_failure_in_setup(testdir):\n testdir.makepyfile(\n \"\"\"\n def setup_module():\n 0/0\n def test_func():\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"--tb=line\")\n assert \"def setup_module\" not in result.stdout.str()\n\n\ndef test_makereport_getsource(testdir):\n testdir.makepyfile(\n \"\"\"\n def test_foo():\n if False: pass\n else: assert False\n \"\"\"\n )\n result = testdir.runpytest()\n assert \"INTERNALERROR\" not in result.stdout.str()\n result.stdout.fnmatch_lines([\"*else: assert False*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_makereport_getsource_dynamic_code_test_makereport_getsource_dynamic_code.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_makereport_getsource_dynamic_code_test_makereport_getsource_dynamic_code.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 843, "end_line": 871, "span_ids": ["test_makereport_getsource_dynamic_code"], "tokens": 193}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_makereport_getsource_dynamic_code(testdir, monkeypatch):\n \"\"\"Test that exception in dynamically generated code doesn't break getting the source line.\"\"\"\n import inspect\n\n original_findsource = inspect.findsource\n\n def findsource(obj, *args, **kwargs):\n # Can be triggered by dynamically created functions\n if obj.__name__ == \"foo\":\n raise IndexError()\n return original_findsource(obj, *args, **kwargs)\n\n monkeypatch.setattr(inspect, \"findsource\", findsource)\n\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture\n def foo(missing):\n pass\n\n def test_fix(foo):\n assert False\n \"\"\"\n )\n result = testdir.runpytest(\"-vv\")\n assert \"INTERNALERROR\" not in result.stdout.str()\n result.stdout.fnmatch_lines([\"*test_fix*\", \"*fixture*'missing'*not found*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_store_except_info_on_error_test_store_except_info_on_error.None_5": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_store_except_info_on_error_test_store_except_info_on_error.None_5", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 874, "end_line": 901, "span_ids": ["test_store_except_info_on_error.ItemMightRaise:2", "test_store_except_info_on_error", "test_store_except_info_on_error.ItemMightRaise"], "tokens": 218}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_store_except_info_on_error():\n \"\"\" Test that upon test failure, the exception info is stored on\n sys.last_traceback and friends.\n \"\"\"\n # Simulate item that might raise a specific exception, depending on `raise_error` class var\n class ItemMightRaise(object):\n nodeid = \"item_that_raises\"\n raise_error = True\n\n def runtest(self):\n if self.raise_error:\n raise IndexError(\"TEST\")\n\n try:\n runner.pytest_runtest_call(ItemMightRaise())\n except IndexError:\n pass\n # Check that exception info is stored on sys\n assert sys.last_type is IndexError\n assert sys.last_value.args[0] == \"TEST\"\n assert sys.last_traceback\n\n # The next run should clear the exception info stored by the previous run\n ItemMightRaise.raise_error = False\n runner.pytest_runtest_call(ItemMightRaise())\n assert sys.last_type is None\n assert sys.last_value is None\n assert sys.last_traceback is None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_current_test_env_var_test_current_test_env_var.assert_PYTEST_CURRENT_TE": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_test_current_test_env_var_test_current_test_env_var.assert_PYTEST_CURRENT_TE", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 904, "end_line": 933, "span_ids": ["test_current_test_env_var"], "tokens": 237}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_current_test_env_var(testdir, monkeypatch):\n pytest_current_test_vars = []\n monkeypatch.setattr(\n sys, \"pytest_current_test_vars\", pytest_current_test_vars, raising=False\n )\n testdir.makepyfile(\n \"\"\"\n import pytest\n import sys\n import os\n\n @pytest.fixture\n def fix():\n sys.pytest_current_test_vars.append(('setup', os.environ['PYTEST_CURRENT_TEST']))\n yield\n sys.pytest_current_test_vars.append(('teardown', os.environ['PYTEST_CURRENT_TEST']))\n\n def test(fix):\n sys.pytest_current_test_vars.append(('call', os.environ['PYTEST_CURRENT_TEST']))\n \"\"\"\n )\n result = testdir.runpytest_inprocess()\n assert result.ret == 0\n test_id = \"test_current_test_env_var.py::test\"\n assert pytest_current_test_vars == [\n (\"setup\", test_id + \" (setup)\"),\n (\"call\", test_id + \" (call)\"),\n (\"teardown\", test_id + \" (teardown)\"),\n ]\n assert \"PYTEST_CURRENT_TEST\" not in os.environ", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_TestReportContents_TestReportContents.test_longreprtext_failure.assert_assert_1_4_in": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_TestReportContents_TestReportContents.test_longreprtext_failure.assert_assert_1_4_in", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 936, "end_line": 963, "span_ids": ["TestReportContents", "TestReportContents.test_longreprtext_failure", "TestReportContents.getrunner", "TestReportContents.test_longreprtext_pass"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestReportContents(object):\n \"\"\"\n Test user-level API of ``TestReport`` objects.\n \"\"\"\n\n def getrunner(self):\n return lambda item: runner.runtestprotocol(item, log=False)\n\n def test_longreprtext_pass(self, testdir):\n reports = testdir.runitem(\n \"\"\"\n def test_func():\n pass\n \"\"\"\n )\n rep = reports[1]\n assert rep.longreprtext == \"\"\n\n def test_longreprtext_failure(self, testdir):\n reports = testdir.runitem(\n \"\"\"\n def test_func():\n x = 1\n assert x == 4\n \"\"\"\n )\n rep = reports[1]\n assert \"assert 1 == 4\" in rep.longreprtext", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_TestReportContents.test_captured_text_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner.py_TestReportContents.test_captured_text_", "embedding": null, "metadata": {"file_path": "testing/test_runner.py", "file_name": "test_runner.py", "file_type": "text/x-python", "category": "test", "start_line": 965, "end_line": 1005, "span_ids": ["TestReportContents.test_no_captured_text", "TestReportContents.test_captured_text"], "tokens": 302}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestReportContents(object):\n\n def test_captured_text(self, testdir):\n reports = testdir.runitem(\n \"\"\"\n import pytest\n import sys\n\n @pytest.fixture\n def fix():\n sys.stdout.write('setup: stdout\\\\n')\n sys.stderr.write('setup: stderr\\\\n')\n yield\n sys.stdout.write('teardown: stdout\\\\n')\n sys.stderr.write('teardown: stderr\\\\n')\n assert 0\n\n def test_func(fix):\n sys.stdout.write('call: stdout\\\\n')\n sys.stderr.write('call: stderr\\\\n')\n assert 0\n \"\"\"\n )\n setup, call, teardown = reports\n assert setup.capstdout == \"setup: stdout\\n\"\n assert call.capstdout == \"setup: stdout\\ncall: stdout\\n\"\n assert teardown.capstdout == \"setup: stdout\\ncall: stdout\\nteardown: stdout\\n\"\n\n assert setup.capstderr == \"setup: stderr\\n\"\n assert call.capstderr == \"setup: stderr\\ncall: stderr\\n\"\n assert teardown.capstderr == \"setup: stderr\\ncall: stderr\\nteardown: stderr\\n\"\n\n def test_no_captured_text(self, testdir):\n reports = testdir.runitem(\n \"\"\"\n def test_func():\n pass\n \"\"\"\n )\n rep = reports[1]\n assert rep.capstdout == \"\"\n assert rep.capstderr == \"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py___test_module_and_function_setup.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py___test_module_and_function_setup.None_1", "embedding": null, "metadata": {"file_path": "testing/test_runner_xunit.py", "file_name": "test_runner_xunit.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 42, "span_ids": ["imports", "docstring", "test_module_and_function_setup"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\n test correct setup/teardowns at\n module, class, and instance level\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pytest\n\n\ndef test_module_and_function_setup(testdir):\n reprec = testdir.inline_runsource(\n \"\"\"\n modlevel = []\n def setup_module(module):\n assert not modlevel\n module.modlevel.append(42)\n\n def teardown_module(module):\n modlevel.pop()\n\n def setup_function(function):\n function.answer = 17\n\n def teardown_function(function):\n del function.answer\n\n def test_modlevel():\n assert modlevel[0] == 42\n assert test_modlevel.answer == 17\n\n class TestFromClass(object):\n def test_module(self):\n assert modlevel[0] == 42\n assert not hasattr(test_modlevel, 'answer')\n \"\"\"\n )\n rep = reprec.matchreport(\"test_modlevel\")\n assert rep.passed\n rep = reprec.matchreport(\"test_module\")\n assert rep.passed", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py_test_module_setup_failure_no_teardown_test_setup_function_failure_no_teardown.assert_calls_0_item_modu": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py_test_module_setup_failure_no_teardown_test_setup_function_failure_no_teardown.assert_calls_0_item_modu", "embedding": null, "metadata": {"file_path": "testing/test_runner_xunit.py", "file_name": "test_runner_xunit.py", "file_type": "text/x-python", "category": "test", "start_line": 45, "end_line": 81, "span_ids": ["test_module_setup_failure_no_teardown", "test_setup_function_failure_no_teardown"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_module_setup_failure_no_teardown(testdir):\n reprec = testdir.inline_runsource(\n \"\"\"\n values = []\n def setup_module(module):\n values.append(1)\n 0/0\n\n def test_nothing():\n pass\n\n def teardown_module(module):\n values.append(2)\n \"\"\"\n )\n reprec.assertoutcome(failed=1)\n calls = reprec.getcalls(\"pytest_runtest_setup\")\n assert calls[0].item.module.values == [1]\n\n\ndef test_setup_function_failure_no_teardown(testdir):\n reprec = testdir.inline_runsource(\n \"\"\"\n modlevel = []\n def setup_function(function):\n modlevel.append(1)\n 0/0\n\n def teardown_function(module):\n modlevel.append(2)\n\n def test_func():\n pass\n \"\"\"\n )\n calls = reprec.getcalls(\"pytest_runtest_setup\")\n assert calls[0].item.module.modlevel == [1]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py_test_class_setup_test_class_setup.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py_test_class_setup_test_class_setup.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_runner_xunit.py", "file_name": "test_runner_xunit.py", "file_type": "text/x-python", "category": "test", "start_line": 84, "end_line": 107, "span_ids": ["test_class_setup"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_class_setup(testdir):\n reprec = testdir.inline_runsource(\n \"\"\"\n class TestSimpleClassSetup(object):\n clslevel = []\n def setup_class(cls):\n cls.clslevel.append(23)\n\n def teardown_class(cls):\n cls.clslevel.pop()\n\n def test_classlevel(self):\n assert self.clslevel[0] == 23\n\n class TestInheritedClassSetupStillWorks(TestSimpleClassSetup):\n def test_classlevel_anothertime(self):\n assert self.clslevel == [23]\n\n def test_cleanup():\n assert not TestSimpleClassSetup.clslevel\n assert not TestInheritedClassSetupStillWorks.clslevel\n \"\"\"\n )\n reprec.assertoutcome(passed=1 + 2 + 1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py_test_class_setup_failure_no_teardown_test_class_setup_failure_no_teardown.reprec_assertoutcome_fail": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py_test_class_setup_failure_no_teardown_test_class_setup_failure_no_teardown.reprec_assertoutcome_fail", "embedding": null, "metadata": {"file_path": "testing/test_runner_xunit.py", "file_name": "test_runner_xunit.py", "file_type": "text/x-python", "category": "test", "start_line": 110, "end_line": 128, "span_ids": ["test_class_setup_failure_no_teardown"], "tokens": 105}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_class_setup_failure_no_teardown(testdir):\n reprec = testdir.inline_runsource(\n \"\"\"\n class TestSimpleClassSetup(object):\n clslevel = []\n def setup_class(cls):\n 0/0\n\n def teardown_class(cls):\n cls.clslevel.append(1)\n\n def test_classlevel(self):\n pass\n\n def test_cleanup():\n assert not TestSimpleClassSetup.clslevel\n \"\"\"\n )\n reprec.assertoutcome(failed=1, passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py_test_method_setup_test_method_setup.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py_test_method_setup_test_method_setup.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_runner_xunit.py", "file_name": "test_runner_xunit.py", "file_type": "text/x-python", "category": "test", "start_line": 131, "end_line": 147, "span_ids": ["test_method_setup"], "tokens": 105}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_method_setup(testdir):\n reprec = testdir.inline_runsource(\n \"\"\"\n class TestSetupMethod(object):\n def setup_method(self, meth):\n self.methsetup = meth\n def teardown_method(self, meth):\n del self.methsetup\n\n def test_some(self):\n assert self.methsetup == self.test_some\n\n def test_other(self):\n assert self.methsetup == self.test_other\n \"\"\"\n )\n reprec.assertoutcome(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py_test_method_setup_failure_no_teardown_test_method_setup_failure_no_teardown.reprec_assertoutcome_fail": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py_test_method_setup_failure_no_teardown_test_method_setup_failure_no_teardown.reprec_assertoutcome_fail", "embedding": null, "metadata": {"file_path": "testing/test_runner_xunit.py", "file_name": "test_runner_xunit.py", "file_type": "text/x-python", "category": "test", "start_line": 150, "end_line": 169, "span_ids": ["test_method_setup_failure_no_teardown"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_method_setup_failure_no_teardown(testdir):\n reprec = testdir.inline_runsource(\n \"\"\"\n class TestMethodSetup(object):\n clslevel = []\n def setup_method(self, method):\n self.clslevel.append(1)\n 0/0\n\n def teardown_method(self, method):\n self.clslevel.append(2)\n\n def test_method(self):\n pass\n\n def test_cleanup():\n assert TestMethodSetup.clslevel == [1]\n \"\"\"\n )\n reprec.assertoutcome(failed=1, passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py_test_method_setup_uses_fresh_instances_test_setup_fails_again_on_all_tests.reprec_assertoutcome_fail": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py_test_method_setup_uses_fresh_instances_test_setup_fails_again_on_all_tests.reprec_assertoutcome_fail", "embedding": null, "metadata": {"file_path": "testing/test_runner_xunit.py", "file_name": "test_runner_xunit.py", "file_type": "text/x-python", "category": "test", "start_line": 172, "end_line": 216, "span_ids": ["test_method_setup_uses_fresh_instances", "test_setup_fails_again_on_all_tests", "test_setup_that_skips_calledagain"], "tokens": 243}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_method_setup_uses_fresh_instances(testdir):\n reprec = testdir.inline_runsource(\n \"\"\"\n class TestSelfState1(object):\n memory = []\n def test_hello(self):\n self.memory.append(self)\n\n def test_afterhello(self):\n assert self != self.memory[0]\n \"\"\"\n )\n reprec.assertoutcome(passed=2, failed=0)\n\n\ndef test_setup_that_skips_calledagain(testdir):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n def setup_module(mod):\n pytest.skip(\"x\")\n def test_function1():\n pass\n def test_function2():\n pass\n \"\"\"\n )\n reprec = testdir.inline_run(p)\n reprec.assertoutcome(skipped=2)\n\n\ndef test_setup_fails_again_on_all_tests(testdir):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n def setup_module(mod):\n raise ValueError(42)\n def test_function1():\n pass\n def test_function2():\n pass\n \"\"\"\n )\n reprec = testdir.inline_run(p)\n reprec.assertoutcome(failed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py_test_setup_funcarg_setup_when_outer_scope_fails_test_setup_funcarg_setup_when_outer_scope_fails.assert_xyz43_not_in_res": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py_test_setup_funcarg_setup_when_outer_scope_fails_test_setup_funcarg_setup_when_outer_scope_fails.assert_xyz43_not_in_res", "embedding": null, "metadata": {"file_path": "testing/test_runner_xunit.py", "file_name": "test_runner_xunit.py", "file_type": "text/x-python", "category": "test", "start_line": 219, "end_line": 244, "span_ids": ["test_setup_funcarg_setup_when_outer_scope_fails"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_setup_funcarg_setup_when_outer_scope_fails(testdir):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n def setup_module(mod):\n raise ValueError(42)\n @pytest.fixture\n def hello(request):\n raise ValueError(\"xyz43\")\n def test_function1(hello):\n pass\n def test_function2(hello):\n pass\n \"\"\"\n )\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines(\n [\n \"*function1*\",\n \"*ValueError*42*\",\n \"*function2*\",\n \"*ValueError*42*\",\n \"*2 error*\",\n ]\n )\n assert \"xyz43\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py_test_setup_teardown_function_level_with_optional_argument_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_runner_xunit.py_test_setup_teardown_function_level_with_optional_argument_", "embedding": null, "metadata": {"file_path": "testing/test_runner_xunit.py", "file_name": "test_runner_xunit.py", "file_type": "text/x-python", "category": "test", "start_line": 247, "end_line": 300, "span_ids": ["test_setup_teardown_function_level_with_optional_argument"], "tokens": 350}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"arg\", [\"\", \"arg\"])\ndef test_setup_teardown_function_level_with_optional_argument(\n testdir, monkeypatch, arg\n):\n \"\"\"parameter to setup/teardown xunit-style functions parameter is now optional (#1728).\"\"\"\n import sys\n\n trace_setups_teardowns = []\n monkeypatch.setattr(\n sys, \"trace_setups_teardowns\", trace_setups_teardowns, raising=False\n )\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n import sys\n\n trace = sys.trace_setups_teardowns.append\n\n def setup_module({arg}): trace('setup_module')\n def teardown_module({arg}): trace('teardown_module')\n\n def setup_function({arg}): trace('setup_function')\n def teardown_function({arg}): trace('teardown_function')\n\n def test_function_1(): pass\n def test_function_2(): pass\n\n class Test(object):\n def setup_method(self, {arg}): trace('setup_method')\n def teardown_method(self, {arg}): trace('teardown_method')\n\n def test_method_1(self): pass\n def test_method_2(self): pass\n \"\"\".format(\n arg=arg\n )\n )\n result = testdir.inline_run(p)\n result.assertoutcome(passed=4)\n\n expected = [\n \"setup_module\",\n \"setup_function\",\n \"teardown_function\",\n \"setup_function\",\n \"teardown_function\",\n \"setup_method\",\n \"teardown_method\",\n \"setup_method\",\n \"teardown_method\",\n \"teardown_module\",\n ]\n assert trace_setups_teardowns == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_from___future___import_ab_SessionTests.test_basic_testitem_events._assert_colreports_1_re": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_from___future___import_ab_SessionTests.test_basic_testitem_events._assert_colreports_1_re", "embedding": null, "metadata": {"file_path": "testing/test_session.py", "file_name": "test_session.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 40, "span_ids": ["SessionTests", "imports", "SessionTests.test_basic_testitem_events"], "tokens": 287}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pytest\nfrom _pytest.main import EXIT_NOTESTSCOLLECTED\n\n\nclass SessionTests(object):\n def test_basic_testitem_events(self, testdir):\n tfile = testdir.makepyfile(\n \"\"\"\n def test_one():\n pass\n def test_one_one():\n assert 0\n def test_other():\n raise ValueError(23)\n class TestClass(object):\n def test_two(self, someargs):\n pass\n \"\"\"\n )\n reprec = testdir.inline_run(tfile)\n passed, skipped, failed = reprec.listoutcomes()\n assert len(skipped) == 0\n assert len(passed) == 1\n assert len(failed) == 3\n\n def end(x):\n return x.nodeid.split(\"::\")[-1]\n\n assert end(failed[0]) == \"test_one_one\"\n assert end(failed[1]) == \"test_other\"\n itemstarted = reprec.getcalls(\"pytest_itemcollected\")\n assert len(itemstarted) == 4\n # XXX check for failing funcarg setup\n # colreports = reprec.getcalls(\"pytest_collectreport\")\n # assert len(colreports) == 4\n # assert colreports[1].report.failed", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_SessionTests.test_nested_import_error_SessionTests.test_nested_import_error.assert_out_find_does_not": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_SessionTests.test_nested_import_error_SessionTests.test_nested_import_error.assert_out_find_does_not", "embedding": null, "metadata": {"file_path": "testing/test_session.py", "file_name": "test_session.py", "file_type": "text/x-python", "category": "test", "start_line": 42, "end_line": 58, "span_ids": ["SessionTests.test_nested_import_error"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SessionTests(object):\n\n def test_nested_import_error(self, testdir):\n tfile = testdir.makepyfile(\n \"\"\"\n import import_fails\n def test_this():\n assert import_fails.a == 1\n \"\"\",\n import_fails=\"\"\"\n import does_not_work\n a = 1\n \"\"\",\n )\n reprec = testdir.inline_run(tfile)\n values = reprec.getfailedcollections()\n assert len(values) == 1\n out = str(values[0].longrepr)\n assert out.find(\"does_not_work\") != -1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_SessionTests.test_raises_output_SessionTests.test_maxfail.assert_passed_skipped_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_SessionTests.test_raises_output_SessionTests.test_maxfail.assert_passed_skipped_", "embedding": null, "metadata": {"file_path": "testing/test_session.py", "file_name": "test_session.py", "file_type": "text/x-python", "category": "test", "start_line": 60, "end_line": 103, "span_ids": ["SessionTests.test_exit_first_problem", "SessionTests.test_syntax_error_module", "SessionTests.test_maxfail", "SessionTests.test_raises_output"], "tokens": 340}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SessionTests(object):\n\n def test_raises_output(self, testdir):\n reprec = testdir.inline_runsource(\n \"\"\"\n import pytest\n def test_raises_doesnt():\n pytest.raises(ValueError, int, \"3\")\n \"\"\"\n )\n passed, skipped, failed = reprec.listoutcomes()\n assert len(failed) == 1\n out = failed[0].longrepr.reprcrash.message\n assert \"DID NOT RAISE\" in out\n\n def test_syntax_error_module(self, testdir):\n reprec = testdir.inline_runsource(\"this is really not python\")\n values = reprec.getfailedcollections()\n assert len(values) == 1\n out = str(values[0].longrepr)\n assert out.find(str(\"not python\")) != -1\n\n def test_exit_first_problem(self, testdir):\n reprec = testdir.inline_runsource(\n \"\"\"\n def test_one(): assert 0\n def test_two(): assert 0\n \"\"\",\n \"--exitfirst\",\n )\n passed, skipped, failed = reprec.countoutcomes()\n assert failed == 1\n assert passed == skipped == 0\n\n def test_maxfail(self, testdir):\n reprec = testdir.inline_runsource(\n \"\"\"\n def test_one(): assert 0\n def test_two(): assert 0\n def test_three(): assert 0\n \"\"\",\n \"--maxfail=2\",\n )\n passed, skipped, failed = reprec.countoutcomes()\n assert failed == 2\n assert passed == skipped == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_SessionTests.test_broken_repr_SessionTests.test_broken_repr.assert_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_SessionTests.test_broken_repr_SessionTests.test_broken_repr.assert_", "embedding": null, "metadata": {"file_path": "testing/test_session.py", "file_name": "test_session.py", "file_type": "text/x-python", "category": "test", "start_line": 105, "end_line": 135, "span_ids": ["SessionTests.test_broken_repr"], "tokens": 240}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SessionTests(object):\n\n def test_broken_repr(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n class BrokenRepr1(object):\n foo=0\n def __repr__(self):\n raise Exception(\"Ha Ha fooled you, I'm a broken repr().\")\n\n class TestBrokenClass(object):\n def test_explicit_bad_repr(self):\n t = BrokenRepr1()\n with pytest.raises(Exception, match=\"I'm a broken repr\"):\n repr(t)\n\n def test_implicit_bad_repr1(self):\n t = BrokenRepr1()\n assert t.foo == 1\n\n \"\"\"\n )\n reprec = testdir.inline_run(p)\n passed, skipped, failed = reprec.listoutcomes()\n assert (len(passed), len(skipped), len(failed)) == (1, 0, 1)\n out = failed[0].longrepr.reprcrash.message\n assert (\n out.find(\n \"\"\"[Exception(\"Ha Ha fooled you, I'm a broken repr().\") raised in repr()]\"\"\"\n )\n != -1\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_SessionTests.test_broken_repr_with_showlocals_verbose_SessionTests.test_broken_repr_with_showlocals_verbose.assert_repr_locals_lines_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_SessionTests.test_broken_repr_with_showlocals_verbose_SessionTests.test_broken_repr_with_showlocals_verbose.assert_repr_locals_lines_", "embedding": null, "metadata": {"file_path": "testing/test_session.py", "file_name": "test_session.py", "file_type": "text/x-python", "category": "test", "start_line": 137, "end_line": 159, "span_ids": ["SessionTests.test_broken_repr_with_showlocals_verbose"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SessionTests(object):\n\n def test_broken_repr_with_showlocals_verbose(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n class ObjWithErrorInRepr:\n def __repr__(self):\n raise NotImplementedError\n\n def test_repr_error():\n x = ObjWithErrorInRepr()\n assert x == \"value\"\n \"\"\"\n )\n reprec = testdir.inline_run(\"--showlocals\", \"-vv\", p)\n passed, skipped, failed = reprec.listoutcomes()\n assert (len(passed), len(skipped), len(failed)) == (0, 0, 1)\n entries = failed[0].longrepr.reprtraceback.reprentries\n assert len(entries) == 1\n repr_locals = entries[0].reprlocals\n assert repr_locals.lines\n assert len(repr_locals.lines) == 1\n assert repr_locals.lines[0].startswith(\n 'x = <[NotImplementedError(\"\") raised in repr()] ObjWithErrorInRepr'\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_SessionTests.test_skip_file_by_conftest_SessionTests.test_skip_file_by_conftest.assert_reports_0_skipped": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_SessionTests.test_skip_file_by_conftest_SessionTests.test_skip_file_by_conftest.assert_reports_0_skipped", "embedding": null, "metadata": {"file_path": "testing/test_session.py", "file_name": "test_session.py", "file_type": "text/x-python", "category": "test", "start_line": 161, "end_line": 178, "span_ids": ["SessionTests.test_skip_file_by_conftest"], "tokens": 137}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class SessionTests(object):\n\n def test_skip_file_by_conftest(self, testdir):\n testdir.makepyfile(\n conftest=\"\"\"\n import pytest\n def pytest_collect_file():\n pytest.skip(\"intentional\")\n \"\"\",\n test_file=\"\"\"\n def test_one(): pass\n \"\"\",\n )\n try:\n reprec = testdir.inline_run(testdir.tmpdir)\n except pytest.skip.Exception: # pragma: no covers\n pytest.fail(\"wrong skipped caught\")\n reports = reprec.getreports(\"pytest_collectreport\")\n assert len(reports) == 1\n assert reports[0].skipped", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_TestNewSession_TestNewSession.test_order_of_execution.assert_passed_7": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_TestNewSession_TestNewSession.test_order_of_execution.assert_passed_7", "embedding": null, "metadata": {"file_path": "testing/test_session.py", "file_name": "test_session.py", "file_type": "text/x-python", "category": "test", "start_line": 181, "end_line": 206, "span_ids": ["TestNewSession.test_order_of_execution", "TestNewSession"], "tokens": 187}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestNewSession(SessionTests):\n def test_order_of_execution(self, testdir):\n reprec = testdir.inline_runsource(\n \"\"\"\n values = []\n def test_1():\n values.append(1)\n def test_2():\n values.append(2)\n def test_3():\n assert values == [1,2]\n class Testmygroup(object):\n reslist = values\n def test_1(self):\n self.reslist.append(1)\n def test_2(self):\n self.reslist.append(2)\n def test_3(self):\n self.reslist.append(3)\n def test_4(self):\n assert self.reslist == [1,2,1,2,3]\n \"\"\"\n )\n passed, skipped, failed = reprec.countoutcomes()\n assert failed == skipped == 0\n assert passed == 7", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_TestNewSession.test_collect_only_with_various_situations_TestNewSession.test_collect_only_with_various_situations.assert_len_colfail_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_TestNewSession.test_collect_only_with_various_situations_TestNewSession.test_collect_only_with_various_situations.assert_len_colfail_1", "embedding": null, "metadata": {"file_path": "testing/test_session.py", "file_name": "test_session.py", "file_type": "text/x-python", "category": "test", "start_line": 208, "end_line": 234, "span_ids": ["TestNewSession.test_collect_only_with_various_situations"], "tokens": 217}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestNewSession(SessionTests):\n\n def test_collect_only_with_various_situations(self, testdir):\n p = testdir.makepyfile(\n test_one=\"\"\"\n def test_one():\n raise ValueError()\n\n class TestX(object):\n def test_method_one(self):\n pass\n\n class TestY(TestX):\n pass\n \"\"\",\n test_three=\"xxxdsadsadsadsa\",\n __init__=\"\",\n )\n reprec = testdir.inline_run(\"--collect-only\", p.dirpath())\n\n itemstarted = reprec.getcalls(\"pytest_itemcollected\")\n assert len(itemstarted) == 3\n assert not reprec.getreports(\"pytest_runtest_logreport\")\n started = reprec.getcalls(\"pytest_collectstart\")\n finished = reprec.getreports(\"pytest_collectreport\")\n assert len(started) == len(finished)\n assert len(started) == 8\n colfail = [x for x in finished if x.failed]\n assert len(colfail) == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_TestNewSession.test_minus_x_import_error_TestNewSession.test_minus_x_overridden_by_maxfail.assert_len_colfail_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_TestNewSession.test_minus_x_import_error_TestNewSession.test_minus_x_overridden_by_maxfail.assert_len_colfail_2", "embedding": null, "metadata": {"file_path": "testing/test_session.py", "file_name": "test_session.py", "file_type": "text/x-python", "category": "test", "start_line": 236, "end_line": 250, "span_ids": ["TestNewSession.test_minus_x_import_error", "TestNewSession.test_minus_x_overridden_by_maxfail"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestNewSession(SessionTests):\n\n def test_minus_x_import_error(self, testdir):\n testdir.makepyfile(__init__=\"\")\n testdir.makepyfile(test_one=\"xxxx\", test_two=\"yyyy\")\n reprec = testdir.inline_run(\"-x\", testdir.tmpdir)\n finished = reprec.getreports(\"pytest_collectreport\")\n colfail = [x for x in finished if x.failed]\n assert len(colfail) == 1\n\n def test_minus_x_overridden_by_maxfail(self, testdir):\n testdir.makepyfile(__init__=\"\")\n testdir.makepyfile(test_one=\"xxxx\", test_two=\"yyyy\", test_third=\"zzz\")\n reprec = testdir.inline_run(\"-x\", \"--maxfail=2\", testdir.tmpdir)\n finished = reprec.getreports(\"pytest_collectreport\")\n colfail = [x for x in finished if x.failed]\n assert len(colfail) == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_test_plugin_specify_test_exclude.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_test_plugin_specify_test_exclude.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_session.py", "file_name": "test_session.py", "file_type": "text/x-python", "category": "test", "start_line": 253, "end_line": 276, "span_ids": ["test_plugin_specify", "test_plugin_already_exists", "test_exclude"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_plugin_specify(testdir):\n with pytest.raises(ImportError):\n testdir.parseconfig(\"-p\", \"nqweotexistent\")\n # pytest.raises(ImportError,\n # \"config.do_configure(config)\"\n # )\n\n\ndef test_plugin_already_exists(testdir):\n config = testdir.parseconfig(\"-p\", \"terminal\")\n assert config.option.plugins == [\"terminal\"]\n config._do_configure()\n config._ensure_unconfigure()\n\n\ndef test_exclude(testdir):\n hellodir = testdir.mkdir(\"hello\")\n hellodir.join(\"test_hello.py\").write(\"x y syntaxerror\")\n hello2dir = testdir.mkdir(\"hello2\")\n hello2dir.join(\"test_hello2.py\").write(\"x y syntaxerror\")\n testdir.makepyfile(test_ok=\"def test_pass(): pass\")\n result = testdir.runpytest(\"--ignore=hello\", \"--ignore=hello2\")\n assert result.ret == 0\n result.stdout.fnmatch_lines([\"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_test_exclude_glob_test_exclude_glob.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_test_exclude_glob_test_exclude_glob.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_session.py", "file_name": "test_session.py", "file_type": "text/x-python", "category": "test", "start_line": 279, "end_line": 291, "span_ids": ["test_exclude_glob"], "tokens": 176}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_exclude_glob(testdir):\n hellodir = testdir.mkdir(\"hello\")\n hellodir.join(\"test_hello.py\").write(\"x y syntaxerror\")\n hello2dir = testdir.mkdir(\"hello2\")\n hello2dir.join(\"test_hello2.py\").write(\"x y syntaxerror\")\n hello3dir = testdir.mkdir(\"hallo3\")\n hello3dir.join(\"test_hello3.py\").write(\"x y syntaxerror\")\n subdir = testdir.mkdir(\"sub\")\n subdir.join(\"test_hello4.py\").write(\"x y syntaxerror\")\n testdir.makepyfile(test_ok=\"def test_pass(): pass\")\n result = testdir.runpytest(\"--ignore-glob=*h[ea]llo*\")\n assert result.ret == 0\n result.stdout.fnmatch_lines([\"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_test_deselect_test_sessionfinish_with_start.assert_res_ret_EXIT_NO": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_test_deselect_test_sessionfinish_with_start.assert_res_ret_EXIT_NO", "embedding": null, "metadata": {"file_path": "testing/test_session.py", "file_name": "test_session.py", "file_type": "text/x-python", "category": "test", "start_line": 294, "end_line": 337, "span_ids": ["test_deselect", "test_sessionfinish_with_start"], "tokens": 284}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_deselect(testdir):\n testdir.makepyfile(\n test_a=\"\"\"\n import pytest\n\n def test_a1(): pass\n\n @pytest.mark.parametrize('b', range(3))\n def test_a2(b): pass\n\n class TestClass:\n def test_c1(self): pass\n\n def test_c2(self): pass\n \"\"\"\n )\n result = testdir.runpytest(\n \"-v\",\n \"--deselect=test_a.py::test_a2[1]\",\n \"--deselect=test_a.py::test_a2[2]\",\n \"--deselect=test_a.py::TestClass::test_c1\",\n )\n assert result.ret == 0\n result.stdout.fnmatch_lines([\"*3 passed, 3 deselected*\"])\n for line in result.stdout.lines:\n assert not line.startswith((\"test_a.py::test_a2[1]\", \"test_a.py::test_a2[2]\"))\n\n\ndef test_sessionfinish_with_start(testdir):\n testdir.makeconftest(\n \"\"\"\n import os\n values = []\n def pytest_sessionstart():\n values.append(os.getcwd())\n os.chdir(\"..\")\n\n def pytest_sessionfinish():\n assert values[0] == os.getcwd()\n\n \"\"\"\n )\n res = testdir.runpytest(\"--collect-only\")\n assert res.ret == EXIT_NOTESTSCOLLECTED", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_test_rootdir_option_arg_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_session.py_test_rootdir_option_arg_", "embedding": null, "metadata": {"file_path": "testing/test_session.py", "file_name": "test_session.py", "file_type": "text/x-python", "category": "test", "start_line": 340, "end_line": 378, "span_ids": ["test_rootdir_option_arg", "test_rootdir_wrong_option_arg"], "tokens": 253}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"path\", [\"root\", \"{relative}/root\", \"{environment}/root\"])\ndef test_rootdir_option_arg(testdir, monkeypatch, path):\n monkeypatch.setenv(\"PY_ROOTDIR_PATH\", str(testdir.tmpdir))\n path = path.format(relative=str(testdir.tmpdir), environment=\"$PY_ROOTDIR_PATH\")\n\n rootdir = testdir.mkdir(\"root\")\n rootdir.mkdir(\"tests\")\n testdir.makepyfile(\n \"\"\"\n import os\n def test_one():\n assert 1\n \"\"\"\n )\n\n result = testdir.runpytest(\"--rootdir={}\".format(path))\n result.stdout.fnmatch_lines(\n [\n \"*rootdir: {}/root\".format(testdir.tmpdir),\n \"root/test_rootdir_option_arg.py *\",\n \"*1 passed*\",\n ]\n )\n\n\ndef test_rootdir_wrong_option_arg(testdir):\n testdir.makepyfile(\n \"\"\"\n import os\n def test_one():\n assert 1\n \"\"\"\n )\n\n result = testdir.runpytest(\"--rootdir=wrong_dir\")\n result.stderr.fnmatch_lines(\n [\"*Directory *wrong_dir* not found. Check your '--rootdir' option.*\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestEvaluator.test_marked_one_arg_unicode_TestEvaluator.test_marked_one_arg_unicode.assert_expl_condition": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestEvaluator.test_marked_one_arg_unicode_TestEvaluator.test_marked_one_arg_unicode.assert_expl_condition", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 52, "end_line": 66, "span_ids": ["TestEvaluator.test_marked_one_arg_unicode"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestEvaluator(object):\n\n @pytest.mark.skipif(\"sys.version_info[0] >= 3\")\n def test_marked_one_arg_unicode(self, testdir):\n item = testdir.getitem(\n \"\"\"\n import pytest\n @pytest.mark.xyz(u\"hasattr(os, 'sep')\")\n def test_func():\n pass\n \"\"\"\n )\n ev = MarkEvaluator(item, \"xyz\")\n assert ev\n assert ev.istrue()\n expl = ev.getexplanation()\n assert expl == \"condition: hasattr(os, 'sep')\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestEvaluator.test_marked_one_arg_with_reason_TestEvaluator.test_marked_one_arg_with_reason.assert_ev_get_attr_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestEvaluator.test_marked_one_arg_with_reason_TestEvaluator.test_marked_one_arg_with_reason.assert_ev_get_attr_", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 68, "end_line": 82, "span_ids": ["TestEvaluator.test_marked_one_arg_with_reason"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestEvaluator(object):\n\n def test_marked_one_arg_with_reason(self, testdir):\n item = testdir.getitem(\n \"\"\"\n import pytest\n @pytest.mark.xyz(\"hasattr(os, 'sep')\", attr=2, reason=\"hello world\")\n def test_func():\n pass\n \"\"\"\n )\n ev = MarkEvaluator(item, \"xyz\")\n assert ev\n assert ev.istrue()\n expl = ev.getexplanation()\n assert expl == \"hello world\"\n assert ev.get(\"attr\") == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestEvaluator.test_marked_one_arg_twice_TestEvaluator.test_marked_one_arg_twice.for_i_in_range_0_2_.assert_expl_condition": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestEvaluator.test_marked_one_arg_twice_TestEvaluator.test_marked_one_arg_twice.for_i_in_range_0_2_.assert_expl_condition", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 84, "end_line": 104, "span_ids": ["TestEvaluator.test_marked_one_arg_twice"], "tokens": 168}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestEvaluator(object):\n\n def test_marked_one_arg_twice(self, testdir):\n lines = [\n \"\"\"@pytest.mark.skipif(\"not hasattr(os, 'murks')\")\"\"\",\n \"\"\"@pytest.mark.skipif(\"hasattr(os, 'murks')\")\"\"\",\n ]\n for i in range(0, 2):\n item = testdir.getitem(\n \"\"\"\n import pytest\n %s\n %s\n def test_func():\n pass\n \"\"\"\n % (lines[i], lines[(i + 1) % 2])\n )\n ev = MarkEvaluator(item, \"skipif\")\n assert ev\n assert ev.istrue()\n expl = ev.getexplanation()\n assert expl == \"condition: not hasattr(os, 'murks')\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestEvaluator.test_marked_one_arg_twice2_TestEvaluator.test_marked_one_arg_twice2.assert_expl_condition": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestEvaluator.test_marked_one_arg_twice2_TestEvaluator.test_marked_one_arg_twice2.assert_expl_condition", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 106, "end_line": 120, "span_ids": ["TestEvaluator.test_marked_one_arg_twice2"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestEvaluator(object):\n\n def test_marked_one_arg_twice2(self, testdir):\n item = testdir.getitem(\n \"\"\"\n import pytest\n @pytest.mark.skipif(\"hasattr(os, 'murks')\")\n @pytest.mark.skipif(\"not hasattr(os, 'murks')\")\n def test_func():\n pass\n \"\"\"\n )\n ev = MarkEvaluator(item, \"skipif\")\n assert ev\n assert ev.istrue()\n expl = ev.getexplanation()\n assert expl == \"condition: not hasattr(os, 'murks')\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestEvaluator.test_marked_skip_with_not_string_TestEvaluator.test_skipif_class.assert_expl_condition": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestEvaluator.test_marked_skip_with_not_string_TestEvaluator.test_skipif_class.assert_expl_condition", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 122, "end_line": 152, "span_ids": ["TestEvaluator.test_marked_skip_with_not_string", "TestEvaluator.test_skipif_class"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestEvaluator(object):\n\n def test_marked_skip_with_not_string(self, testdir):\n item = testdir.getitem(\n \"\"\"\n import pytest\n @pytest.mark.skipif(False)\n def test_func():\n pass\n \"\"\"\n )\n ev = MarkEvaluator(item, \"skipif\")\n exc = pytest.raises(pytest.fail.Exception, ev.istrue)\n assert (\n \"\"\"Failed: you need to specify reason=STRING when using booleans as conditions.\"\"\"\n in exc.value.msg\n )\n\n def test_skipif_class(self, testdir):\n item, = testdir.getitems(\n \"\"\"\n import pytest\n class TestClass(object):\n pytestmark = pytest.mark.skipif(\"config._hackxyz\")\n def test_func(self):\n pass\n \"\"\"\n )\n item.config._hackxyz = 3\n ev = MarkEvaluator(item, \"skipif\")\n assert ev.istrue()\n expl = ev.getexplanation()\n assert expl == \"condition: config._hackxyz\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail_TestXFail.test_xfail_simple.assert_callreport_wasxfai": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail_TestXFail.test_xfail_simple.assert_callreport_wasxfai", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 155, "end_line": 171, "span_ids": ["TestXFail", "TestXFail.test_xfail_simple"], "tokens": 120}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestXFail(object):\n @pytest.mark.parametrize(\"strict\", [True, False])\n def test_xfail_simple(self, testdir, strict):\n item = testdir.getitem(\n \"\"\"\n import pytest\n @pytest.mark.xfail(strict=%s)\n def test_func():\n assert 0\n \"\"\"\n % strict\n )\n reports = runtestprotocol(item, log=False)\n assert len(reports) == 3\n callreport = reports[1]\n assert callreport.skipped\n assert callreport.wasxfail == \"\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_xpassed_TestXFail.test_xfail_using_platform.assert_callreport_wasxfai": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_xpassed_TestXFail.test_xfail_using_platform.assert_callreport_wasxfai", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 173, "end_line": 203, "span_ids": ["TestXFail.test_xfail_using_platform", "TestXFail.test_xfail_xpassed"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestXFail(object):\n\n def test_xfail_xpassed(self, testdir):\n item = testdir.getitem(\n \"\"\"\n import pytest\n @pytest.mark.xfail(reason=\"this is an xfail\")\n def test_func():\n assert 1\n \"\"\"\n )\n reports = runtestprotocol(item, log=False)\n assert len(reports) == 3\n callreport = reports[1]\n assert callreport.passed\n assert callreport.wasxfail == \"this is an xfail\"\n\n def test_xfail_using_platform(self, testdir):\n \"\"\"\n Verify that platform can be used with xfail statements.\n \"\"\"\n item = testdir.getitem(\n \"\"\"\n import pytest\n @pytest.mark.xfail(\"platform.platform() == platform.platform()\")\n def test_func():\n assert 0\n \"\"\"\n )\n reports = runtestprotocol(item, log=False)\n assert len(reports) == 3\n callreport = reports[1]\n assert callreport.wasxfail", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_xpassed_strict_TestXFail.test_xfail_xpassed_strict.assert_not_hasattr_callre": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_xpassed_strict_TestXFail.test_xfail_xpassed_strict.assert_not_hasattr_callre", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 205, "end_line": 219, "span_ids": ["TestXFail.test_xfail_xpassed_strict"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestXFail(object):\n\n def test_xfail_xpassed_strict(self, testdir):\n item = testdir.getitem(\n \"\"\"\n import pytest\n @pytest.mark.xfail(strict=True, reason=\"nope\")\n def test_func():\n assert 1\n \"\"\"\n )\n reports = runtestprotocol(item, log=False)\n assert len(reports) == 3\n callreport = reports[1]\n assert callreport.failed\n assert callreport.longrepr == \"[XPASS(strict)] nope\"\n assert not hasattr(callreport, \"wasxfail\")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_run_anyway_TestXFail.test_xfail_not_report_default._": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_run_anyway_TestXFail.test_xfail_not_report_default._", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 221, "end_line": 264, "span_ids": ["TestXFail.test_xfail_not_report_default", "TestXFail.test_xfail_run_anyway", "TestXFail.test_xfail_evalfalse_but_fails"], "tokens": 293}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestXFail(object):\n\n def test_xfail_run_anyway(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.xfail\n def test_func():\n assert 0\n def test_func2():\n pytest.xfail(\"hello\")\n \"\"\"\n )\n result = testdir.runpytest(\"--runxfail\")\n result.stdout.fnmatch_lines(\n [\"*def test_func():*\", \"*assert 0*\", \"*1 failed*1 pass*\"]\n )\n\n def test_xfail_evalfalse_but_fails(self, testdir):\n item = testdir.getitem(\n \"\"\"\n import pytest\n @pytest.mark.xfail('False')\n def test_func():\n assert 0\n \"\"\"\n )\n reports = runtestprotocol(item, log=False)\n callreport = reports[1]\n assert callreport.failed\n assert not hasattr(callreport, \"wasxfail\")\n assert \"xfail\" in callreport.keywords\n\n def test_xfail_not_report_default(self, testdir):\n p = testdir.makepyfile(\n test_one=\"\"\"\n import pytest\n @pytest.mark.xfail\n def test_this():\n assert 0\n \"\"\"\n )\n testdir.runpytest(p, \"-v\")\n # result.stdout.fnmatch_lines([\n # \"*HINT*use*-r*\"\n # ])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_not_run_xfail_reporting_TestXFail.test_xfail_not_run_xfail_reporting.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_not_run_xfail_reporting_TestXFail.test_xfail_not_run_xfail_reporting.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 266, "end_line": 290, "span_ids": ["TestXFail.test_xfail_not_run_xfail_reporting"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestXFail(object):\n\n def test_xfail_not_run_xfail_reporting(self, testdir):\n p = testdir.makepyfile(\n test_one=\"\"\"\n import pytest\n @pytest.mark.xfail(run=False, reason=\"noway\")\n def test_this():\n assert 0\n @pytest.mark.xfail(\"True\", run=False)\n def test_this_true():\n assert 0\n @pytest.mark.xfail(\"False\", run=False, reason=\"huh\")\n def test_this_false():\n assert 1\n \"\"\"\n )\n result = testdir.runpytest(p, \"-rx\")\n result.stdout.fnmatch_lines(\n [\n \"*test_one*test_this*\",\n \"*NOTRUN*noway\",\n \"*test_one*test_this_true*\",\n \"*NOTRUN*condition:*True*\",\n \"*1 passed*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_not_run_no_setup_run_TestXFail.test_xfail_xpass.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_not_run_no_setup_run_TestXFail.test_xfail_xpass.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 292, "end_line": 319, "span_ids": ["TestXFail.test_xfail_not_run_no_setup_run", "TestXFail.test_xfail_xpass"], "tokens": 211}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestXFail(object):\n\n def test_xfail_not_run_no_setup_run(self, testdir):\n p = testdir.makepyfile(\n test_one=\"\"\"\n import pytest\n @pytest.mark.xfail(run=False, reason=\"hello\")\n def test_this():\n assert 0\n def setup_module(mod):\n raise ValueError(42)\n \"\"\"\n )\n result = testdir.runpytest(p, \"-rx\")\n result.stdout.fnmatch_lines(\n [\"*test_one*test_this*\", \"*NOTRUN*hello\", \"*1 xfailed*\"]\n )\n\n def test_xfail_xpass(self, testdir):\n p = testdir.makepyfile(\n test_one=\"\"\"\n import pytest\n @pytest.mark.xfail\n def test_that():\n assert 1\n \"\"\"\n )\n result = testdir.runpytest(p, \"-rX\")\n result.stdout.fnmatch_lines([\"*XPASS*test_that*\", \"*1 xpassed*\"])\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_imperative_TestXFail.test_xfail_imperative.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_imperative_TestXFail.test_xfail_imperative.None_2", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 321, "end_line": 334, "span_ids": ["TestXFail.test_xfail_imperative"], "tokens": 130}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestXFail(object):\n\n def test_xfail_imperative(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n def test_this():\n pytest.xfail(\"hello\")\n \"\"\"\n )\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines([\"*1 xfailed*\"])\n result = testdir.runpytest(p, \"-rx\")\n result.stdout.fnmatch_lines([\"*XFAIL*test_this*\", \"*reason:*hello*\"])\n result = testdir.runpytest(p, \"--runxfail\")\n result.stdout.fnmatch_lines([\"*1 pass*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_imperative_in_setup_function_TestXFail.test_xfail_imperative_in_setup_function.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_imperative_in_setup_function_TestXFail.test_xfail_imperative_in_setup_function.None_2", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 336, "end_line": 357, "span_ids": ["TestXFail.test_xfail_imperative_in_setup_function"], "tokens": 156}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestXFail(object):\n\n def test_xfail_imperative_in_setup_function(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n def setup_function(function):\n pytest.xfail(\"hello\")\n\n def test_this():\n assert 0\n \"\"\"\n )\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines([\"*1 xfailed*\"])\n result = testdir.runpytest(p, \"-rx\")\n result.stdout.fnmatch_lines([\"*XFAIL*test_this*\", \"*reason:*hello*\"])\n result = testdir.runpytest(p, \"--runxfail\")\n result.stdout.fnmatch_lines(\n \"\"\"\n *def test_this*\n *1 fail*\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.xtest_dynamic_xfail_set_during_setup_TestXFail.test_dynamic_xfail_set_during_funcarg_setup.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.xtest_dynamic_xfail_set_during_setup_TestXFail.test_dynamic_xfail_set_during_funcarg_setup.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 359, "end_line": 400, "span_ids": ["TestXFail.xtest_dynamic_xfail_set_during_setup", "TestXFail.test_dynamic_xfail_no_run", "TestXFail.test_dynamic_xfail_set_during_funcarg_setup"], "tokens": 296}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestXFail(object):\n\n def xtest_dynamic_xfail_set_during_setup(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n def setup_function(function):\n pytest.mark.xfail(function)\n def test_this():\n assert 0\n def test_that():\n assert 1\n \"\"\"\n )\n result = testdir.runpytest(p, \"-rxX\")\n result.stdout.fnmatch_lines([\"*XFAIL*test_this*\", \"*XPASS*test_that*\"])\n\n def test_dynamic_xfail_no_run(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture\n def arg(request):\n request.applymarker(pytest.mark.xfail(run=False))\n def test_this(arg):\n assert 0\n \"\"\"\n )\n result = testdir.runpytest(p, \"-rxX\")\n result.stdout.fnmatch_lines([\"*XFAIL*test_this*\", \"*NOTRUN*\"])\n\n def test_dynamic_xfail_set_during_funcarg_setup(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture\n def arg(request):\n request.applymarker(pytest.mark.xfail)\n def test_this2(arg):\n assert 0\n \"\"\"\n )\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines([\"*1 xfailed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_raises_TestXFail.test_xfail_raises.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_raises_TestXFail.test_xfail_raises.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 402, "end_line": 422, "span_ids": ["TestXFail.test_xfail_raises"], "tokens": 171}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestXFail(object):\n\n @pytest.mark.parametrize(\n \"expected, actual, matchline\",\n [\n (\"TypeError\", \"TypeError\", \"*1 xfailed*\"),\n (\"(AttributeError, TypeError)\", \"TypeError\", \"*1 xfailed*\"),\n (\"TypeError\", \"IndexError\", \"*1 failed*\"),\n (\"(AttributeError, TypeError)\", \"IndexError\", \"*1 failed*\"),\n ],\n )\n def test_xfail_raises(self, expected, actual, matchline, testdir):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.xfail(raises=%s)\n def test_raises():\n raise %s()\n \"\"\"\n % (expected, actual)\n )\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines([matchline])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_strict_sanity_TestXFail.test_strict_sanity.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_strict_sanity_TestXFail.test_strict_sanity.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 424, "end_line": 438, "span_ids": ["TestXFail.test_strict_sanity"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestXFail(object):\n\n def test_strict_sanity(self, testdir):\n \"\"\"sanity check for xfail(strict=True): a failing test should behave\n exactly like a normal xfail.\n \"\"\"\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.xfail(reason='unsupported feature', strict=True)\n def test_foo():\n assert 0\n \"\"\"\n )\n result = testdir.runpytest(p, \"-rxX\")\n result.stdout.fnmatch_lines([\"*XFAIL*\", \"*unsupported feature*\"])\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_strict_xfail_TestXFail.test_strict_xfail.assert_testdir_tmpdir_joi": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_strict_xfail_TestXFail.test_strict_xfail.assert_testdir_tmpdir_joi", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 440, "end_line": 465, "span_ids": ["TestXFail.test_strict_xfail"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestXFail(object):\n\n @pytest.mark.parametrize(\"strict\", [True, False])\n def test_strict_xfail(self, testdir, strict):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.mark.xfail(reason='unsupported feature', strict=%s)\n def test_foo():\n with open('foo_executed', 'w'): pass # make sure test executes\n \"\"\"\n % strict\n )\n result = testdir.runpytest(p, \"-rxX\")\n if strict:\n result.stdout.fnmatch_lines(\n [\"*test_foo*\", \"*XPASS(strict)*unsupported feature*\"]\n )\n else:\n result.stdout.fnmatch_lines(\n [\n \"*test_strict_xfail*\",\n \"XPASS test_strict_xfail.py::test_foo unsupported feature\",\n ]\n )\n assert result.ret == (1 if strict else 0)\n assert testdir.tmpdir.join(\"foo_executed\").isfile()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_strict_xfail_condition_TestXFail.test_strict_xfail_condition.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_strict_xfail_condition_TestXFail.test_strict_xfail_condition.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 467, "end_line": 481, "span_ids": ["TestXFail.test_strict_xfail_condition"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestXFail(object):\n\n @pytest.mark.parametrize(\"strict\", [True, False])\n def test_strict_xfail_condition(self, testdir, strict):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.mark.xfail(False, reason='unsupported feature', strict=%s)\n def test_foo():\n pass\n \"\"\"\n % strict\n )\n result = testdir.runpytest(p, \"-rxX\")\n result.stdout.fnmatch_lines([\"*1 passed*\"])\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_condition_keyword_TestXFail.test_xfail_condition_keyword.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_xfail_condition_keyword_TestXFail.test_xfail_condition_keyword.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 483, "end_line": 497, "span_ids": ["TestXFail.test_xfail_condition_keyword"], "tokens": 115}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestXFail(object):\n\n @pytest.mark.parametrize(\"strict\", [True, False])\n def test_xfail_condition_keyword(self, testdir, strict):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.mark.xfail(condition=False, reason='unsupported feature', strict=%s)\n def test_foo():\n pass\n \"\"\"\n % strict\n )\n result = testdir.runpytest(p, \"-rxX\")\n result.stdout.fnmatch_lines([\"*1 passed*\"])\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_strict_xfail_default_from_file_TestXFail.test_strict_xfail_default_from_file.assert_result_ret_1_i": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFail.test_strict_xfail_default_from_file_TestXFail.test_strict_xfail_default_from_file.assert_result_ret_1_i", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 499, "end_line": 519, "span_ids": ["TestXFail.test_strict_xfail_default_from_file"], "tokens": 163}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestXFail(object):\n\n @pytest.mark.parametrize(\"strict_val\", [\"true\", \"false\"])\n def test_strict_xfail_default_from_file(self, testdir, strict_val):\n testdir.makeini(\n \"\"\"\n [pytest]\n xfail_strict = %s\n \"\"\"\n % strict_val\n )\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.xfail(reason='unsupported feature')\n def test_foo():\n pass\n \"\"\"\n )\n result = testdir.runpytest(p, \"-rxX\")\n strict = strict_val == \"true\"\n result.stdout.fnmatch_lines([\"*1 failed*\" if strict else \"*1 xpassed*\"])\n assert result.ret == (1 if strict else 0)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFailwithSetupTeardown_TestXFailwithSetupTeardown.test_failing_teardown_issue9.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestXFailwithSetupTeardown_TestXFailwithSetupTeardown.test_failing_teardown_issue9.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 522, "end_line": 551, "span_ids": ["TestXFailwithSetupTeardown.test_failing_teardown_issue9", "TestXFailwithSetupTeardown.test_failing_setup_issue9", "TestXFailwithSetupTeardown"], "tokens": 165}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestXFailwithSetupTeardown(object):\n def test_failing_setup_issue9(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n def setup_function(func):\n assert 0\n\n @pytest.mark.xfail\n def test_func():\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*1 xfail*\"])\n\n def test_failing_teardown_issue9(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n def teardown_function(func):\n assert 0\n\n @pytest.mark.xfail\n def test_func():\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*1 xfail*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestSkip_TestSkip.test_skip_with_reason.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestSkip_TestSkip.test_skip_with_reason.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 554, "end_line": 619, "span_ids": ["TestSkip.test_skips_on_false_string", "TestSkip.test_skip_class", "TestSkip", "TestSkip.test_skip_with_reason", "TestSkip.test_skip_no_reason", "TestSkip.test_arg_as_reason"], "tokens": 372}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSkip(object):\n def test_skip_class(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.skip\n class TestSomething(object):\n def test_foo(self):\n pass\n def test_bar(self):\n pass\n\n def test_baz():\n pass\n \"\"\"\n )\n rec = testdir.inline_run()\n rec.assertoutcome(skipped=2, passed=1)\n\n def test_skips_on_false_string(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.skip('False')\n def test_foo():\n pass\n \"\"\"\n )\n rec = testdir.inline_run()\n rec.assertoutcome(skipped=1)\n\n def test_arg_as_reason(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.skip('testing stuff')\n def test_bar():\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"-rs\")\n result.stdout.fnmatch_lines([\"*testing stuff*\", \"*1 skipped*\"])\n\n def test_skip_no_reason(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.skip\n def test_foo():\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"-rs\")\n result.stdout.fnmatch_lines([\"*unconditional skip*\", \"*1 skipped*\"])\n\n def test_skip_with_reason(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.skip(reason=\"for lolz\")\n def test_bar():\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"-rs\")\n result.stdout.fnmatch_lines([\"*for lolz*\", \"*1 skipped*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestSkip.test_only_skips_marked_test_TestSkip.test_strict_and_skip.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestSkip.test_only_skips_marked_test_TestSkip.test_strict_and_skip.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 621, "end_line": 648, "span_ids": ["TestSkip.test_only_skips_marked_test", "TestSkip.test_strict_and_skip"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSkip(object):\n\n def test_only_skips_marked_test(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.skip\n def test_foo():\n pass\n @pytest.mark.skip(reason=\"nothing in particular\")\n def test_bar():\n pass\n def test_baz():\n assert True\n \"\"\"\n )\n result = testdir.runpytest(\"-rs\")\n result.stdout.fnmatch_lines([\"*nothing in particular*\", \"*1 passed*2 skipped*\"])\n\n def test_strict_and_skip(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.skip\n def test_hello():\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"-rs\")\n result.stdout.fnmatch_lines([\"*unconditional skip*\", \"*1 skipped*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestSkipif_TestSkipif.test_skipif_reporting.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestSkipif_TestSkipif.test_skipif_reporting.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 651, "end_line": 679, "span_ids": ["TestSkipif.test_skipif_reporting", "TestSkipif.test_skipif_conditional", "TestSkipif"], "tokens": 232}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSkipif(object):\n def test_skipif_conditional(self, testdir):\n item = testdir.getitem(\n \"\"\"\n import pytest\n @pytest.mark.skipif(\"hasattr(os, 'sep')\")\n def test_func():\n pass\n \"\"\"\n )\n x = pytest.raises(pytest.skip.Exception, lambda: pytest_runtest_setup(item))\n assert x.value.msg == \"condition: hasattr(os, 'sep')\"\n\n @pytest.mark.parametrize(\n \"params\", [\"\\\"hasattr(sys, 'platform')\\\"\", 'True, reason=\"invalid platform\"']\n )\n def test_skipif_reporting(self, testdir, params):\n p = testdir.makepyfile(\n test_foo=\"\"\"\n import pytest\n @pytest.mark.skipif(%(params)s)\n def test_that():\n assert 0\n \"\"\"\n % dict(params=params)\n )\n result = testdir.runpytest(p, \"-s\", \"-rs\")\n result.stdout.fnmatch_lines([\"*SKIP*1*test_foo.py*platform*\", \"*1 skipped*\"])\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestSkipif.test_skipif_using_platform_TestSkipif.test_skipif_reporting_multiple.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestSkipif.test_skipif_using_platform_TestSkipif.test_skipif_reporting_multiple.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 681, "end_line": 715, "span_ids": ["TestSkipif.test_skipif_reporting_multiple", "TestSkipif.test_skipif_using_platform"], "tokens": 277}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestSkipif(object):\n\n def test_skipif_using_platform(self, testdir):\n item = testdir.getitem(\n \"\"\"\n import pytest\n @pytest.mark.skipif(\"platform.platform() == platform.platform()\")\n def test_func():\n pass\n \"\"\"\n )\n pytest.raises(pytest.skip.Exception, lambda: pytest_runtest_setup(item))\n\n @pytest.mark.parametrize(\n \"marker, msg1, msg2\",\n [(\"skipif\", \"SKIP\", \"skipped\"), (\"xfail\", \"XPASS\", \"xpassed\")],\n )\n def test_skipif_reporting_multiple(self, testdir, marker, msg1, msg2):\n testdir.makepyfile(\n test_foo=\"\"\"\n import pytest\n @pytest.mark.{marker}(False, reason='first_condition')\n @pytest.mark.{marker}(True, reason='second_condition')\n def test_foobar():\n assert 1\n \"\"\".format(\n marker=marker\n )\n )\n result = testdir.runpytest(\"-s\", \"-rsxX\")\n result.stdout.fnmatch_lines(\n [\n \"*{msg1}*test_foo.py*second_condition*\".format(msg1=msg1),\n \"*1 {msg2}*\".format(msg2=msg2),\n ]\n )\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_skip_not_report_default_test_skipif_class.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_skip_not_report_default_test_skipif_class.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 718, "end_line": 749, "span_ids": ["test_skipif_class", "test_skip_not_report_default"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_skip_not_report_default(testdir):\n p = testdir.makepyfile(\n test_one=\"\"\"\n import pytest\n def test_this():\n pytest.skip(\"hello\")\n \"\"\"\n )\n result = testdir.runpytest(p, \"-v\")\n result.stdout.fnmatch_lines(\n [\n # \"*HINT*use*-r*\",\n \"*1 skipped*\"\n ]\n )\n\n\ndef test_skipif_class(testdir):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n\n class TestClass(object):\n pytestmark = pytest.mark.skipif(\"True\")\n def test_that(self):\n assert 0\n def test_though(self):\n assert 0\n \"\"\"\n )\n result = testdir.runpytest(p)\n result.stdout.fnmatch_lines([\"*2 skipped*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_skipped_reasons_functional_test_skipped_reasons_functional.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_skipped_reasons_functional_test_skipped_reasons_functional.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 752, "end_line": 772, "span_ids": ["test_skipped_reasons_functional"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_skipped_reasons_functional(testdir):\n testdir.makepyfile(\n test_one=\"\"\"\n from conftest import doskip\n def setup_function(func):\n doskip()\n def test_func():\n pass\n class TestClass(object):\n def test_method(self):\n doskip()\n \"\"\",\n conftest=\"\"\"\n import pytest\n def doskip():\n pytest.skip('test')\n \"\"\",\n )\n result = testdir.runpytest(\"-rs\")\n result.stdout.fnmatch_lines([\"*SKIP*2*conftest.py:4: test\"])\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_skipped_folding_test_skipped_folding.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_skipped_folding_test_skipped_folding.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 775, "end_line": 791, "span_ids": ["test_skipped_folding"], "tokens": 107}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_skipped_folding(testdir):\n testdir.makepyfile(\n test_one=\"\"\"\n import pytest\n pytestmark = pytest.mark.skip(\"Folding\")\n def setup_function(func):\n pass\n def test_func():\n pass\n class TestClass(object):\n def test_method(self):\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"-rs\")\n result.stdout.fnmatch_lines([\"*SKIP*2*test_one.py: Folding\"])\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_reportchars_test_reportchars_error.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_reportchars_test_reportchars_error.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 794, "end_line": 828, "span_ids": ["test_reportchars", "test_reportchars_error"], "tokens": 214}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reportchars(testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n def test_1():\n assert 0\n @pytest.mark.xfail\n def test_2():\n assert 0\n @pytest.mark.xfail\n def test_3():\n pass\n def test_4():\n pytest.skip(\"four\")\n \"\"\"\n )\n result = testdir.runpytest(\"-rfxXs\")\n result.stdout.fnmatch_lines(\n [\"FAIL*test_1*\", \"XFAIL*test_2*\", \"XPASS*test_3*\", \"SKIP*four*\"]\n )\n\n\ndef test_reportchars_error(testdir):\n testdir.makepyfile(\n conftest=\"\"\"\n def pytest_runtest_teardown():\n assert 0\n \"\"\",\n test_simple=\"\"\"\n def test_foo():\n pass\n \"\"\",\n )\n result = testdir.runpytest(\"-rE\")\n result.stdout.fnmatch_lines([\"ERROR*test_foo*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_reportchars_all_test_reportchars_all_error.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_reportchars_all_test_reportchars_all_error.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 831, "end_line": 876, "span_ids": ["test_reportchars_all_error", "test_reportchars_all"], "tokens": 256}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_reportchars_all(testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n def test_1():\n assert 0\n @pytest.mark.xfail\n def test_2():\n assert 0\n @pytest.mark.xfail\n def test_3():\n pass\n def test_4():\n pytest.skip(\"four\")\n @pytest.fixture\n def fail():\n assert 0\n def test_5(fail):\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"-ra\")\n result.stdout.fnmatch_lines(\n [\n \"SKIP*four*\",\n \"XFAIL*test_2*\",\n \"XPASS*test_3*\",\n \"ERROR*test_5*\",\n \"FAIL*test_1*\",\n ]\n )\n\n\ndef test_reportchars_all_error(testdir):\n testdir.makepyfile(\n conftest=\"\"\"\n def pytest_runtest_teardown():\n assert 0\n \"\"\",\n test_simple=\"\"\"\n def test_foo():\n pass\n \"\"\",\n )\n result = testdir.runpytest(\"-ra\")\n result.stdout.fnmatch_lines([\"ERROR*test_foo*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_errors_in_xfail_skip_expressions_test_errors_in_xfail_skip_expressions.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_errors_in_xfail_skip_expressions_test_errors_in_xfail_skip_expressions.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 879, "end_line": 915, "span_ids": ["test_errors_in_xfail_skip_expressions"], "tokens": 262}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_errors_in_xfail_skip_expressions(testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.skipif(\"asd\")\n def test_nameerror():\n pass\n @pytest.mark.xfail(\"syntax error\")\n def test_syntax():\n pass\n\n def test_func():\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n markline = \" ^\"\n if sys.platform.startswith(\"java\"):\n # XXX report this to java\n markline = \"*\" + markline[8:]\n elif hasattr(sys, \"pypy_version_info\") and sys.pypy_version_info < (6,):\n markline = markline[5:]\n elif sys.version_info >= (3, 8) or hasattr(sys, \"pypy_version_info\"):\n markline = markline[4:]\n result.stdout.fnmatch_lines(\n [\n \"*ERROR*test_nameerror*\",\n \"*evaluating*skipif*expression*\",\n \"*asd*\",\n \"*ERROR*test_syntax*\",\n \"*evaluating*xfail*expression*\",\n \" syntax error\",\n markline,\n \"SyntaxError: invalid syntax\",\n \"*1 pass*2 error*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_xfail_skipif_with_globals_test_xfail_skipif_with_globals.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_xfail_skipif_with_globals_test_xfail_skipif_with_globals.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 918, "end_line": 932, "span_ids": ["test_xfail_skipif_with_globals"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_xfail_skipif_with_globals(testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n x = 3\n @pytest.mark.skipif(\"x == 3\")\n def test_skip1():\n pass\n @pytest.mark.xfail(\"x == 3\")\n def test_boolean():\n assert 0\n \"\"\"\n )\n result = testdir.runpytest(\"-rsx\")\n result.stdout.fnmatch_lines([\"*SKIP*x == 3*\", \"*XFAIL*test_boolean*\", \"*x == 3*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_direct_gives_error_test_default_markers.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_direct_gives_error_test_default_markers.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 935, "end_line": 955, "span_ids": ["test_direct_gives_error", "test_default_markers"], "tokens": 127}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_direct_gives_error(testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.skipif(True)\n def test_skip1():\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*1 error*\"])\n\n\ndef test_default_markers(testdir):\n result = testdir.runpytest(\"--markers\")\n result.stdout.fnmatch_lines(\n [\n \"*skipif(*condition)*skip*\",\n \"*xfail(*condition, reason=None, run=True, raises=None, strict=False)*expected failure*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_xfail_test_setup_exception_test_xfail_test_setup_exception.assert_xpassed_not_in_r": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_xfail_test_setup_exception_test_xfail_test_setup_exception.assert_xpassed_not_in_r", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 958, "end_line": 976, "span_ids": ["test_xfail_test_setup_exception"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_xfail_test_setup_exception(testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_runtest_setup():\n 0 / 0\n \"\"\"\n )\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.xfail\n def test_func():\n assert 0\n \"\"\"\n )\n result = testdir.runpytest(p)\n assert result.ret == 0\n assert \"xfailed\" in result.stdout.str()\n assert \"xpassed\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_imperativeskip_on_xfail_test_test_imperativeskip_on_xfail_test.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_imperativeskip_on_xfail_test_test_imperativeskip_on_xfail_test.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 979, "end_line": 1006, "span_ids": ["test_imperativeskip_on_xfail_test"], "tokens": 143}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_imperativeskip_on_xfail_test(testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.xfail\n def test_that_fails():\n assert 0\n\n @pytest.mark.skipif(\"True\")\n def test_hello():\n pass\n \"\"\"\n )\n testdir.makeconftest(\n \"\"\"\n import pytest\n def pytest_runtest_setup(item):\n pytest.skip(\"abc\")\n \"\"\"\n )\n result = testdir.runpytest(\"-rsxX\")\n result.stdout.fnmatch_lines_random(\n \"\"\"\n *SKIP*abc*\n *SKIP*condition: True*\n *2 skipped*\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestBooleanCondition_TestBooleanCondition.test_xfail.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_TestBooleanCondition_TestBooleanCondition.test_xfail.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 1009, "end_line": 1061, "span_ids": ["TestBooleanCondition.test_skipif_noreason", "TestBooleanCondition.test_xfail", "TestBooleanCondition.test_skipif", "TestBooleanCondition"], "tokens": 270}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestBooleanCondition(object):\n def test_skipif(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.skipif(True, reason=\"True123\")\n def test_func1():\n pass\n @pytest.mark.skipif(False, reason=\"True123\")\n def test_func2():\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n \"\"\"\n *1 passed*1 skipped*\n \"\"\"\n )\n\n def test_skipif_noreason(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.skipif(True)\n def test_func():\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"-rs\")\n result.stdout.fnmatch_lines(\n \"\"\"\n *1 error*\n \"\"\"\n )\n\n def test_xfail(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.xfail(True, reason=\"True123\")\n def test_func():\n assert 0\n \"\"\"\n )\n result = testdir.runpytest(\"-rxs\")\n result.stdout.fnmatch_lines(\n \"\"\"\n *XFAIL*\n *True123*\n *1 xfail*\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_xfail_item_test_xfail_item.assert_xfailed": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_xfail_item_test_xfail_item.assert_xfailed", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 1064, "end_line": 1083, "span_ids": ["test_xfail_item"], "tokens": 134}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_xfail_item(testdir):\n # Ensure pytest.xfail works with non-Python Item\n testdir.makeconftest(\n \"\"\"\n import pytest\n\n class MyItem(pytest.Item):\n nodeid = 'foo'\n def runtest(self):\n pytest.xfail(\"Expected Failure\")\n\n def pytest_collect_file(path, parent):\n return MyItem(\"foo\", parent)\n \"\"\"\n )\n result = testdir.inline_run()\n passed, skipped, failed = result.listoutcomes()\n assert not failed\n xfailed = [r for r in skipped if hasattr(r, \"wasxfail\")]\n assert xfailed", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_module_level_skip_error_test_invalid_skip_keyword_parameter.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_module_level_skip_error_test_invalid_skip_keyword_parameter.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 1086, "end_line": 1135, "span_ids": ["test_invalid_skip_keyword_parameter", "test_module_level_skip_with_allow_module_level", "test_module_level_skip_error"], "tokens": 275}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_module_level_skip_error(testdir):\n \"\"\"\n Verify that using pytest.skip at module level causes a collection error\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.skip\n def test_func():\n assert True\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\"*Using pytest.skip outside of a test is not allowed*\"]\n )\n\n\ndef test_module_level_skip_with_allow_module_level(testdir):\n \"\"\"\n Verify that using pytest.skip(allow_module_level=True) is allowed\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n pytest.skip(\"skip_module_level\", allow_module_level=True)\n\n def test_func():\n assert 0\n \"\"\"\n )\n result = testdir.runpytest(\"-rxs\")\n result.stdout.fnmatch_lines([\"*SKIP*skip_module_level\"])\n\n\ndef test_invalid_skip_keyword_parameter(testdir):\n \"\"\"\n Verify that using pytest.skip() with unknown parameter raises an error\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n pytest.skip(\"skip_module_level\", unknown=1)\n\n def test_func():\n assert 0\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*TypeError:*['unknown']*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_mark_xfail_item_test_mark_xfail_item.assert_xfailed": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_mark_xfail_item_test_mark_xfail_item.assert_xfailed", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 1138, "end_line": 1160, "span_ids": ["test_mark_xfail_item"], "tokens": 157}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_mark_xfail_item(testdir):\n # Ensure pytest.mark.xfail works with non-Python Item\n testdir.makeconftest(\n \"\"\"\n import pytest\n\n class MyItem(pytest.Item):\n nodeid = 'foo'\n def setup(self):\n marker = pytest.mark.xfail(True, reason=\"Expected failure\")\n self.add_marker(marker)\n def runtest(self):\n assert False\n\n def pytest_collect_file(path, parent):\n return MyItem(\"foo\", parent)\n \"\"\"\n )\n result = testdir.inline_run()\n passed, skipped, failed = result.listoutcomes()\n assert not failed\n xfailed = [r for r in skipped if hasattr(r, \"wasxfail\")]\n assert xfailed", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_summary_list_after_errors_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_test_summary_list_after_errors_", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 1162, "end_line": 1179, "span_ids": ["test_summary_list_after_errors"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_summary_list_after_errors(testdir):\n \"\"\"Ensure the list of errors/fails/xfails/skips appears after tracebacks in terminal reporting.\"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n def test_fail():\n assert 0\n \"\"\"\n )\n result = testdir.runpytest(\"-ra\")\n result.stdout.fnmatch_lines(\n [\n \"=* FAILURES *=\",\n \"*= short test summary info =*\",\n \"FAILED test_summary_list_after_errors.py::test_fail\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_stepwise.py_pytest_stepwise_testdir.return.testdir": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_stepwise.py_pytest_stepwise_testdir.return.testdir", "embedding": null, "metadata": {"file_path": "testing/test_stepwise.py", "file_name": "test_stepwise.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 52, "span_ids": ["imports", "stepwise_testdir"], "tokens": 274}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import pytest\n\n\n@pytest.fixture\ndef stepwise_testdir(testdir):\n # Rather than having to modify our testfile between tests, we introduce\n # a flag for wether or not the second test should fail.\n testdir.makeconftest(\n \"\"\"\ndef pytest_addoption(parser):\n group = parser.getgroup('general')\n group.addoption('--fail', action='store_true', dest='fail')\n group.addoption('--fail-last', action='store_true', dest='fail_last')\n\"\"\"\n )\n\n # Create a simple test suite.\n testdir.makepyfile(\n test_a=\"\"\"\ndef test_success_before_fail():\n assert 1\n\ndef test_fail_on_flag(request):\n assert not request.config.getvalue('fail')\n\ndef test_success_after_fail():\n assert 1\n\ndef test_fail_last_on_flag(request):\n assert not request.config.getvalue('fail_last')\n\ndef test_success_after_last_fail():\n assert 1\n\"\"\"\n )\n\n testdir.makepyfile(\n test_b=\"\"\"\ndef test_success():\n assert 1\n\"\"\"\n )\n\n # customize cache directory so we don't use the tox's cache directory, which makes tests in this module flaky\n testdir.makeini(\n \"\"\"\n [pytest]\n cache_dir = .cache\n \"\"\"\n )\n\n return testdir", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_stepwise.py_error_testdir_test_run_without_stepwise.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_stepwise.py_error_testdir_test_run_without_stepwise.None_2", "embedding": null, "metadata": {"file_path": "testing/test_stepwise.py", "file_name": "test_stepwise.py", "file_type": "text/x-python", "category": "test", "start_line": 55, "end_line": 83, "span_ids": ["test_run_without_stepwise", "broken_testdir", "error_testdir"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture\ndef error_testdir(testdir):\n testdir.makepyfile(\n test_a=\"\"\"\ndef test_error(nonexisting_fixture):\n assert 1\n\ndef test_success_after_fail():\n assert 1\n\"\"\"\n )\n\n return testdir\n\n\n@pytest.fixture\ndef broken_testdir(testdir):\n testdir.makepyfile(\n working_testfile=\"def test_proper(): assert 1\", broken_testfile=\"foobar\"\n )\n return testdir\n\n\ndef test_run_without_stepwise(stepwise_testdir):\n result = stepwise_testdir.runpytest(\"-v\", \"--strict\", \"--fail\")\n\n result.stdout.fnmatch_lines([\"*test_success_before_fail PASSED*\"])\n result.stdout.fnmatch_lines([\"*test_fail_on_flag FAILED*\"])\n result.stdout.fnmatch_lines([\"*test_success_after_fail PASSED*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_stepwise.py_test_fail_and_continue_with_stepwise_test_fail_and_continue_with_stepwise.None_7": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_stepwise.py_test_fail_and_continue_with_stepwise_test_fail_and_continue_with_stepwise.None_7", "embedding": null, "metadata": {"file_path": "testing/test_stepwise.py", "file_name": "test_stepwise.py", "file_type": "text/x-python", "category": "test", "start_line": 86, "end_line": 105, "span_ids": ["test_fail_and_continue_with_stepwise"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fail_and_continue_with_stepwise(stepwise_testdir):\n # Run the tests with a failing second test.\n result = stepwise_testdir.runpytest(\"-v\", \"--strict\", \"--stepwise\", \"--fail\")\n assert not result.stderr.str()\n\n stdout = result.stdout.str()\n # Make sure we stop after first failing test.\n assert \"test_success_before_fail PASSED\" in stdout\n assert \"test_fail_on_flag FAILED\" in stdout\n assert \"test_success_after_fail\" not in stdout\n\n # \"Fix\" the test that failed in the last run and run it again.\n result = stepwise_testdir.runpytest(\"-v\", \"--strict\", \"--stepwise\")\n assert not result.stderr.str()\n\n stdout = result.stdout.str()\n # Make sure the latest failing test runs and then continues.\n assert \"test_success_before_fail\" not in stdout\n assert \"test_fail_on_flag PASSED\" in stdout\n assert \"test_success_after_fail PASSED\" in stdout", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_stepwise.py_test_run_with_skip_option_test_fail_on_errors.assert_test_success_afte": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_stepwise.py_test_run_with_skip_option_test_fail_on_errors.assert_test_success_afte", "embedding": null, "metadata": {"file_path": "testing/test_stepwise.py", "file_name": "test_stepwise.py", "file_type": "text/x-python", "category": "test", "start_line": 108, "end_line": 129, "span_ids": ["test_run_with_skip_option", "test_fail_on_errors"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_run_with_skip_option(stepwise_testdir):\n result = stepwise_testdir.runpytest(\n \"-v\", \"--strict\", \"--stepwise\", \"--stepwise-skip\", \"--fail\", \"--fail-last\"\n )\n assert not result.stderr.str()\n\n stdout = result.stdout.str()\n # Make sure first fail is ignore and second fail stops the test run.\n assert \"test_fail_on_flag FAILED\" in stdout\n assert \"test_success_after_fail PASSED\" in stdout\n assert \"test_fail_last_on_flag FAILED\" in stdout\n assert \"test_success_after_last_fail\" not in stdout\n\n\ndef test_fail_on_errors(error_testdir):\n result = error_testdir.runpytest(\"-v\", \"--strict\", \"--stepwise\")\n\n assert not result.stderr.str()\n stdout = result.stdout.str()\n\n assert \"test_error ERROR\" in stdout\n assert \"test_success_after_fail\" not in stdout", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_stepwise.py_test_change_testfile_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_stepwise.py_test_change_testfile_", "embedding": null, "metadata": {"file_path": "testing/test_stepwise.py", "file_name": "test_stepwise.py", "file_type": "text/x-python", "category": "test", "start_line": 132, "end_line": 157, "span_ids": ["test_change_testfile", "test_stop_on_collection_errors"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_change_testfile(stepwise_testdir):\n result = stepwise_testdir.runpytest(\n \"-v\", \"--strict\", \"--stepwise\", \"--fail\", \"test_a.py\"\n )\n assert not result.stderr.str()\n\n stdout = result.stdout.str()\n assert \"test_fail_on_flag FAILED\" in stdout\n\n # Make sure the second test run starts from the beginning, since the\n # test to continue from does not exist in testfile_b.\n result = stepwise_testdir.runpytest(\"-v\", \"--strict\", \"--stepwise\", \"test_b.py\")\n assert not result.stderr.str()\n\n stdout = result.stdout.str()\n assert \"test_success PASSED\" in stdout\n\n\ndef test_stop_on_collection_errors(broken_testdir):\n result = broken_testdir.runpytest(\n \"-v\", \"--strict\", \"--stepwise\", \"working_testfile.py\", \"broken_testfile.py\"\n )\n\n stdout = result.stdout.str()\n assert \"errors during collection\" in stdout", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_plugin_nameversion_test_plugin_nameversion.assert_result_expected": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_plugin_nameversion_test_plugin_nameversion.assert_result_expected", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 57, "end_line": 75, "span_ids": ["test_plugin_nameversion"], "tokens": 141}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"input,expected\",\n [\n ([DistInfo(project_name=\"test\", version=1)], [\"test-1\"]),\n ([DistInfo(project_name=\"pytest-test\", version=1)], [\"test-1\"]),\n (\n [\n DistInfo(project_name=\"test\", version=1),\n DistInfo(project_name=\"test\", version=1),\n ],\n [\"test-1\"],\n ),\n ],\n ids=[\"normal\", \"prefix-strip\", \"deduplicate\"],\n)\ndef test_plugin_nameversion(input, expected):\n pluginlist = [(None, x) for x in input]\n result = _plugin_nameversions(pluginlist)\n assert result == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminal_TestTerminal.test_pass_skip_fail.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminal_TestTerminal.test_pass_skip_fail.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 78, "end_line": 106, "span_ids": ["TestTerminal", "TestTerminal.test_pass_skip_fail"], "tokens": 208}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTerminal(object):\n def test_pass_skip_fail(self, testdir, option):\n testdir.makepyfile(\n \"\"\"\n import pytest\n def test_ok():\n pass\n def test_skip():\n pytest.skip(\"xx\")\n def test_func():\n assert 0\n \"\"\"\n )\n result = testdir.runpytest(*option.args)\n if option.verbosity > 0:\n result.stdout.fnmatch_lines(\n [\n \"*test_pass_skip_fail.py::test_ok PASS*\",\n \"*test_pass_skip_fail.py::test_skip SKIP*\",\n \"*test_pass_skip_fail.py::test_func FAIL*\",\n ]\n )\n elif option.verbosity == 0:\n result.stdout.fnmatch_lines([\"*test_pass_skip_fail.py .sF*\"])\n else:\n result.stdout.fnmatch_lines([\".sF*\"])\n result.stdout.fnmatch_lines(\n [\" def test_func():\", \"> assert 0\", \"E assert 0\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminal.test_internalerror_TestTerminal.test_writeline.assert_lines_2_hello": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminal.test_internalerror_TestTerminal.test_writeline.assert_lines_2_hello", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 108, "end_line": 124, "span_ids": ["TestTerminal.test_internalerror", "TestTerminal.test_writeline"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTerminal(object):\n\n def test_internalerror(self, testdir, linecomp):\n modcol = testdir.getmodulecol(\"def test_one(): pass\")\n rep = TerminalReporter(modcol.config, file=linecomp.stringio)\n with pytest.raises(ValueError) as excinfo:\n raise ValueError(\"hello\")\n rep.pytest_internalerror(excinfo.getrepr())\n linecomp.assert_contains_lines([\"INTERNALERROR> *ValueError*hello*\"])\n\n def test_writeline(self, testdir, linecomp):\n modcol = testdir.getmodulecol(\"def test_one(): pass\")\n rep = TerminalReporter(modcol.config, file=linecomp.stringio)\n rep.write_fspath_result(modcol.nodeid, \".\")\n rep.write_line(\"hello world\")\n lines = linecomp.stringio.getvalue().split(\"\\n\")\n assert not lines[0]\n assert lines[1].endswith(modcol.name + \" .\")\n assert lines[2] == \"hello world\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminal.test_show_runtest_logstart_TestTerminal.test_runtest_location_shown_before_test_starts.child_kill_15_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminal.test_show_runtest_logstart_TestTerminal.test_runtest_location_shown_before_test_starts.child_kill_15_", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 126, "end_line": 147, "span_ids": ["TestTerminal.test_runtest_location_shown_before_test_starts", "TestTerminal.test_show_runtest_logstart"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTerminal(object):\n\n def test_show_runtest_logstart(self, testdir, linecomp):\n item = testdir.getitem(\"def test_func(): pass\")\n tr = TerminalReporter(item.config, file=linecomp.stringio)\n item.config.pluginmanager.register(tr)\n location = item.reportinfo()\n tr.config.hook.pytest_runtest_logstart(\n nodeid=item.nodeid, location=location, fspath=str(item.fspath)\n )\n linecomp.assert_contains_lines([\"*test_show_runtest_logstart.py*\"])\n\n def test_runtest_location_shown_before_test_starts(self, testdir):\n testdir.makepyfile(\n \"\"\"\n def test_1():\n import time\n time.sleep(20)\n \"\"\"\n )\n child = testdir.spawn_pytest(\"\")\n child.expect(\".*test_runtest_location.*py\")\n child.sendeof()\n child.kill(15)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminal.test_report_collect_after_half_a_second_TestTerminal.test_report_collect_after_half_a_second.assert_2_passed_in_in_r": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminal.test_report_collect_after_half_a_second_TestTerminal.test_report_collect_after_half_a_second.assert_2_passed_in_in_r", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 149, "end_line": 172, "span_ids": ["TestTerminal.test_report_collect_after_half_a_second"], "tokens": 186}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTerminal(object):\n\n def test_report_collect_after_half_a_second(self, testdir):\n \"\"\"Test for \"collecting\" being updated after 0.5s\"\"\"\n\n testdir.makepyfile(\n **{\n \"test1.py\": \"\"\"\n import _pytest.terminal\n\n _pytest.terminal.REPORT_COLLECTING_RESOLUTION = 0\n\n def test_1():\n pass\n \"\"\",\n \"test2.py\": \"def test_2(): pass\",\n }\n )\n\n child = testdir.spawn_pytest(\"-v test1.py test2.py\")\n child.expect(r\"collecting \\.\\.\\.\")\n child.expect(r\"collecting 1 item\")\n child.expect(r\"collecting 2 items\")\n child.expect(r\"collected 2 items\")\n rest = child.read().decode(\"utf8\")\n assert \"2 passed in\" in rest", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminal.test_itemreport_subclasses_show_subclassed_file_TestTerminal.test_itemreport_subclasses_show_subclassed_file.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminal.test_itemreport_subclasses_show_subclassed_file_TestTerminal.test_itemreport_subclasses_show_subclassed_file.None_2", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 174, "end_line": 196, "span_ids": ["TestTerminal.test_itemreport_subclasses_show_subclassed_file"], "tokens": 181}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTerminal(object):\n\n def test_itemreport_subclasses_show_subclassed_file(self, testdir):\n testdir.makepyfile(\n test_p1=\"\"\"\n class BaseTests(object):\n def test_p1(self):\n pass\n class TestClass(BaseTests):\n pass\n \"\"\"\n )\n p2 = testdir.makepyfile(\n test_p2=\"\"\"\n from test_p1 import BaseTests\n class TestMore(BaseTests):\n pass\n \"\"\"\n )\n result = testdir.runpytest(p2)\n result.stdout.fnmatch_lines([\"*test_p2.py .*\", \"*1 passed*\"])\n result = testdir.runpytest(\"-vv\", p2)\n result.stdout.fnmatch_lines(\n [\"*test_p2.py::TestMore::test_p1* <- *test_p1.py*PASSED*\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminal.test_itemreport_directclasses_not_shown_as_subclasses_TestTerminal.test_itemreport_directclasses_not_shown_as_subclasses.assert_not_in_resu": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminal.test_itemreport_directclasses_not_shown_as_subclasses_TestTerminal.test_itemreport_directclasses_not_shown_as_subclasses.assert_not_in_resu", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 198, "end_line": 212, "span_ids": ["TestTerminal.test_itemreport_directclasses_not_shown_as_subclasses"], "tokens": 124}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTerminal(object):\n\n def test_itemreport_directclasses_not_shown_as_subclasses(self, testdir):\n a = testdir.mkpydir(\"a123\")\n a.join(\"test_hello123.py\").write(\n textwrap.dedent(\n \"\"\"\\\n class TestClass(object):\n def test_method(self):\n pass\n \"\"\"\n )\n )\n result = testdir.runpytest(\"-vv\")\n assert result.ret == 0\n result.stdout.fnmatch_lines([\"*a123/test_hello123.py*PASS*\"])\n assert \" <- \" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminal.test_keyboard_interrupt_TestTerminal.test_keyboard_interrupt.None_2": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminal.test_keyboard_interrupt_TestTerminal.test_keyboard_interrupt.None_2", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 214, "end_line": 243, "span_ids": ["TestTerminal.test_keyboard_interrupt"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTerminal(object):\n\n def test_keyboard_interrupt(self, testdir, option):\n testdir.makepyfile(\n \"\"\"\n def test_foobar():\n assert 0\n def test_spamegg():\n import py; pytest.skip('skip me please!')\n def test_interrupt_me():\n raise KeyboardInterrupt # simulating the user\n \"\"\"\n )\n\n result = testdir.runpytest(*option.args, no_reraise_ctrlc=True)\n result.stdout.fnmatch_lines(\n [\n \" def test_foobar():\",\n \"> assert 0\",\n \"E assert 0\",\n \"*_keyboard_interrupt.py:6: KeyboardInterrupt*\",\n ]\n )\n if option.fulltrace:\n result.stdout.fnmatch_lines(\n [\"*raise KeyboardInterrupt # simulating the user*\"]\n )\n else:\n result.stdout.fnmatch_lines(\n [\"(to show a full traceback on KeyboardInterrupt use --fulltrace)\"]\n )\n result.stdout.fnmatch_lines([\"*KeyboardInterrupt*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminal.test_keyboard_in_sessionstart_TestTerminal.test_rewrite.assert_f_getvalue_h": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminal.test_keyboard_in_sessionstart_TestTerminal.test_rewrite.assert_f_getvalue_h", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 245, "end_line": 282, "span_ids": ["TestTerminal.test_collect_single_item", "TestTerminal.test_rewrite", "TestTerminal.test_keyboard_in_sessionstart"], "tokens": 271}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTerminal(object):\n\n def test_keyboard_in_sessionstart(self, testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_sessionstart():\n raise KeyboardInterrupt\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n def test_foobar():\n pass\n \"\"\"\n )\n\n result = testdir.runpytest(no_reraise_ctrlc=True)\n assert result.ret == 2\n result.stdout.fnmatch_lines([\"*KeyboardInterrupt*\"])\n\n def test_collect_single_item(self, testdir):\n \"\"\"Use singular 'item' when reporting a single test item\"\"\"\n testdir.makepyfile(\n \"\"\"\n def test_foobar():\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"collected 1 item\"])\n\n def test_rewrite(self, testdir, monkeypatch):\n config = testdir.parseconfig()\n f = py.io.TextIO()\n monkeypatch.setattr(f, \"isatty\", lambda *args: True)\n tr = TerminalReporter(config, f)\n tr._tw.fullwidth = 10\n tr.write(\"hello\")\n tr.rewrite(\"hey\", erase=True)\n assert f.getvalue() == \"hello\" + \"\\r\" + \"hey\" + (6 * \" \")", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestCollectonly_TestCollectonly.test_collectonly_fatal.assert_result_ret_3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestCollectonly_TestCollectonly.test_collectonly_fatal.assert_result_ret_3", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 285, "end_line": 334, "span_ids": ["TestCollectonly.test_collectonly_skipped_module", "TestCollectonly.test_collectonly_display_test_description", "TestCollectonly.test_collectonly_failed_module", "TestCollectonly", "TestCollectonly.test_collectonly_basic", "TestCollectonly.test_collectonly_fatal"], "tokens": 340}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCollectonly(object):\n def test_collectonly_basic(self, testdir):\n testdir.makepyfile(\n \"\"\"\n def test_func():\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"--collect-only\")\n result.stdout.fnmatch_lines(\n [\"\", \" \"]\n )\n\n def test_collectonly_skipped_module(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n pytest.skip(\"hello\")\n \"\"\"\n )\n result = testdir.runpytest(\"--collect-only\", \"-rs\")\n result.stdout.fnmatch_lines([\"*ERROR collecting*\"])\n\n def test_collectonly_display_test_description(self, testdir):\n testdir.makepyfile(\n \"\"\"\n def test_with_description():\n \\\"\"\" This test has a description.\n \\\"\"\"\n assert True\n \"\"\"\n )\n result = testdir.runpytest(\"--collect-only\", \"--verbose\")\n result.stdout.fnmatch_lines([\" This test has a description.\"])\n\n def test_collectonly_failed_module(self, testdir):\n testdir.makepyfile(\"\"\"raise ValueError(0)\"\"\")\n result = testdir.runpytest(\"--collect-only\")\n result.stdout.fnmatch_lines([\"*raise ValueError*\", \"*1 error*\"])\n\n def test_collectonly_fatal(self, testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_collectstart(collector):\n assert 0, \"urgs\"\n \"\"\"\n )\n result = testdir.runpytest(\"--collect-only\")\n result.stdout.fnmatch_lines([\"*INTERNAL*args*\"])\n assert result.ret == 3", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestCollectonly.test_collectonly_simple_TestCollectonly.test_collectonly_simple.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestCollectonly.test_collectonly_simple_TestCollectonly.test_collectonly_simple.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 336, "end_line": 356, "span_ids": ["TestCollectonly.test_collectonly_simple"], "tokens": 132}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCollectonly(object):\n\n def test_collectonly_simple(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n def test_func1():\n pass\n class TestClass(object):\n def test_method(self):\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"--collect-only\", p)\n # assert stderr.startswith(\"inserting into sys.path\")\n assert result.ret == 0\n result.stdout.fnmatch_lines(\n [\n \"*\",\n \"* \",\n \"* \",\n \"* \",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestCollectonly.test_collectonly_error_TestCollectonly.test_collectonly_more_quiet.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestCollectonly.test_collectonly_error_TestCollectonly.test_collectonly_more_quiet.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 358, "end_line": 390, "span_ids": ["TestCollectonly.test_collectonly_more_quiet", "TestCollectonly.test_collectonly_missing_path", "TestCollectonly.test_collectonly_error", "TestCollectonly.test_collectonly_quiet"], "tokens": 294}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestCollectonly(object):\n\n def test_collectonly_error(self, testdir):\n p = testdir.makepyfile(\"import Errlkjqweqwe\")\n result = testdir.runpytest(\"--collect-only\", p)\n assert result.ret == 2\n result.stdout.fnmatch_lines(\n textwrap.dedent(\n \"\"\"\\\n *ERROR*\n *ImportError*\n *No module named *Errlk*\n *1 error*\n \"\"\"\n ).strip()\n )\n\n def test_collectonly_missing_path(self, testdir):\n \"\"\"this checks issue 115,\n failure in parseargs will cause session\n not to have the items attribute\n \"\"\"\n result = testdir.runpytest(\"--collect-only\", \"uhm_missing_path\")\n assert result.ret == 4\n result.stderr.fnmatch_lines([\"*ERROR: file not found*\"])\n\n def test_collectonly_quiet(self, testdir):\n testdir.makepyfile(\"def test_foo(): pass\")\n result = testdir.runpytest(\"--collect-only\", \"-q\")\n result.stdout.fnmatch_lines([\"*test_foo*\"])\n\n def test_collectonly_more_quiet(self, testdir):\n testdir.makepyfile(test_fun=\"def test_foo(): pass\")\n result = testdir.runpytest(\"--collect-only\", \"-qq\")\n result.stdout.fnmatch_lines([\"*test_fun.py: 1*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestFixtureReporting_TestFixtureReporting.test_setup_fixture_error.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestFixtureReporting_TestFixtureReporting.test_setup_fixture_error.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 393, "end_line": 414, "span_ids": ["TestFixtureReporting.test_setup_fixture_error", "TestFixtureReporting"], "tokens": 122}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureReporting(object):\n def test_setup_fixture_error(self, testdir):\n testdir.makepyfile(\n \"\"\"\n def setup_function(function):\n print(\"setup func\")\n assert 0\n def test_nada():\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"*ERROR at setup of test_nada*\",\n \"*setup_function(function):*\",\n \"*setup func*\",\n \"*assert 0*\",\n \"*1 error*\",\n ]\n )\n assert result.ret != 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestFixtureReporting.test_teardown_fixture_error_TestFixtureReporting.test_teardown_fixture_error.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestFixtureReporting.test_teardown_fixture_error_TestFixtureReporting.test_teardown_fixture_error.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 416, "end_line": 436, "span_ids": ["TestFixtureReporting.test_teardown_fixture_error"], "tokens": 125}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureReporting(object):\n\n def test_teardown_fixture_error(self, testdir):\n testdir.makepyfile(\n \"\"\"\n def test_nada():\n pass\n def teardown_function(function):\n print(\"teardown func\")\n assert 0\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"*ERROR at teardown*\",\n \"*teardown_function(function):*\",\n \"*assert 0*\",\n \"*Captured stdout*\",\n \"*teardown func*\",\n \"*1 passed*1 error*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestFixtureReporting.test_teardown_fixture_error_and_test_failure_TestFixtureReporting.test_teardown_fixture_error_and_test_failure.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestFixtureReporting.test_teardown_fixture_error_and_test_failure_TestFixtureReporting.test_teardown_fixture_error_and_test_failure.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 438, "end_line": 462, "span_ids": ["TestFixtureReporting.test_teardown_fixture_error_and_test_failure"], "tokens": 155}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureReporting(object):\n\n def test_teardown_fixture_error_and_test_failure(self, testdir):\n testdir.makepyfile(\n \"\"\"\n def test_fail():\n assert 0, \"failingfunc\"\n\n def teardown_function(function):\n print(\"teardown func\")\n assert False\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"*ERROR at teardown of test_fail*\",\n \"*teardown_function(function):*\",\n \"*assert False*\",\n \"*Captured stdout*\",\n \"*teardown func*\",\n \"*test_fail*\",\n \"*def test_fail():\",\n \"*failingfunc*\",\n \"*1 failed*1 error*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestFixtureReporting.test_setup_teardown_output_and_test_failure_TestFixtureReporting.test_setup_teardown_output_and_test_failure.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestFixtureReporting.test_setup_teardown_output_and_test_failure_TestFixtureReporting.test_setup_teardown_output_and_test_failure.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 464, "end_line": 490, "span_ids": ["TestFixtureReporting.test_setup_teardown_output_and_test_failure"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestFixtureReporting(object):\n\n def test_setup_teardown_output_and_test_failure(self, testdir):\n \"\"\" Test for issue #442 \"\"\"\n testdir.makepyfile(\n \"\"\"\n def setup_function(function):\n print(\"setup func\")\n\n def test_fail():\n assert 0, \"failingfunc\"\n\n def teardown_function(function):\n print(\"teardown func\")\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"*test_fail*\",\n \"*def test_fail():\",\n \"*failingfunc*\",\n \"*Captured stdout setup*\",\n \"*setup func*\",\n \"*Captured stdout teardown*\",\n \"*teardown func*\",\n \"*1 failed*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional_TestTerminalFunctional.test_deselected.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional_TestTerminalFunctional.test_deselected.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 493, "end_line": 509, "span_ids": ["TestTerminalFunctional", "TestTerminalFunctional.test_deselected"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTerminalFunctional(object):\n def test_deselected(self, testdir):\n testpath = testdir.makepyfile(\n \"\"\"\n def test_one():\n pass\n def test_two():\n pass\n def test_three():\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"-k\", \"test_two:\", testpath)\n result.stdout.fnmatch_lines(\n [\"collected 3 items / 1 deselected / 2 selected\", \"*test_deselected.py ..*\"]\n )\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_deselected_with_hookwrapper_TestTerminalFunctional.test_deselected_with_hookwrapper.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_deselected_with_hookwrapper_TestTerminalFunctional.test_deselected_with_hookwrapper.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 511, "end_line": 540, "span_ids": ["TestTerminalFunctional.test_deselected_with_hookwrapper"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTerminalFunctional(object):\n\n def test_deselected_with_hookwrapper(self, testdir):\n testpath = testdir.makeconftest(\n \"\"\"\n import pytest\n\n @pytest.hookimpl(hookwrapper=True)\n def pytest_collection_modifyitems(config, items):\n yield\n deselected = items.pop()\n config.hook.pytest_deselected(items=[deselected])\n \"\"\"\n )\n testpath = testdir.makepyfile(\n \"\"\"\n def test_one():\n pass\n def test_two():\n pass\n def test_three():\n pass\n \"\"\"\n )\n result = testdir.runpytest(testpath)\n result.stdout.fnmatch_lines(\n [\n \"collected 3 items / 1 deselected / 2 selected\",\n \"*= 2 passed, 1 deselected in*\",\n ]\n )\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_show_deselected_items_using_markexpr_before_test_execution_TestTerminalFunctional.test_show_deselected_items_using_markexpr_before_test_execution.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_show_deselected_items_using_markexpr_before_test_execution_TestTerminalFunctional.test_show_deselected_items_using_markexpr_before_test_execution.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 542, "end_line": 568, "span_ids": ["TestTerminalFunctional.test_show_deselected_items_using_markexpr_before_test_execution"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTerminalFunctional(object):\n\n def test_show_deselected_items_using_markexpr_before_test_execution(self, testdir):\n testdir.makepyfile(\n test_show_deselected=\"\"\"\n import pytest\n\n @pytest.mark.foo\n def test_foobar():\n pass\n\n @pytest.mark.bar\n def test_bar():\n pass\n\n def test_pass():\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"-m\", \"not foo\")\n result.stdout.fnmatch_lines(\n [\n \"collected 3 items / 1 deselected / 2 selected\",\n \"*test_show_deselected.py ..*\",\n \"*= 2 passed, 1 deselected in * =*\",\n ]\n )\n assert \"= 1 deselected =\" not in result.stdout.str()\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_no_skip_summary_if_failure_TestTerminalFunctional.test_passes.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_no_skip_summary_if_failure_TestTerminalFunctional.test_passes.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 570, "end_line": 602, "span_ids": ["TestTerminalFunctional.test_passes", "TestTerminalFunctional.test_no_skip_summary_if_failure"], "tokens": 203}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTerminalFunctional(object):\n\n def test_no_skip_summary_if_failure(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n def test_ok():\n pass\n def test_fail():\n assert 0\n def test_skip():\n pytest.skip(\"dontshow\")\n \"\"\"\n )\n result = testdir.runpytest()\n assert result.stdout.str().find(\"skip test summary\") == -1\n assert result.ret == 1\n\n def test_passes(self, testdir):\n p1 = testdir.makepyfile(\n \"\"\"\n def test_passes():\n pass\n class TestClass(object):\n def test_method(self):\n pass\n \"\"\"\n )\n old = p1.dirpath().chdir()\n try:\n result = testdir.runpytest()\n finally:\n old.chdir()\n result.stdout.fnmatch_lines([\"test_passes.py ..*\", \"* 2 pass*\"])\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_header_trailer_info_TestTerminalFunctional.test_header_trailer_info.if_request_config_pluginm.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_header_trailer_info_TestTerminalFunctional.test_header_trailer_info.if_request_config_pluginm.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 604, "end_line": 629, "span_ids": ["TestTerminalFunctional.test_header_trailer_info"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTerminalFunctional(object):\n\n def test_header_trailer_info(self, testdir, request):\n testdir.makepyfile(\n \"\"\"\n def test_passes():\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n verinfo = \".\".join(map(str, sys.version_info[:3]))\n result.stdout.fnmatch_lines(\n [\n \"*===== test session starts ====*\",\n \"platform %s -- Python %s*pytest-%s*py-%s*pluggy-%s\"\n % (\n sys.platform,\n verinfo,\n pytest.__version__,\n py.__version__,\n pluggy.__version__,\n ),\n \"*test_header_trailer_info.py .*\",\n \"=* 1 passed*in *.[0-9][0-9] seconds *=\",\n ]\n )\n if request.config.pluginmanager.list_plugin_distinfo():\n result.stdout.fnmatch_lines([\"plugins: *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_header_TestTerminalFunctional.test_header.None_7": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_header_TestTerminalFunctional.test_header.None_7", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 631, "end_line": 658, "span_ids": ["TestTerminalFunctional.test_header"], "tokens": 249}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTerminalFunctional(object):\n\n def test_header(self, testdir, request):\n testdir.tmpdir.join(\"tests\").ensure_dir()\n testdir.tmpdir.join(\"gui\").ensure_dir()\n\n # no ini file\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"rootdir: *test_header0\"])\n\n # with inifile\n testdir.makeini(\"\"\"[pytest]\"\"\")\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"rootdir: *test_header0, inifile: tox.ini\"])\n\n # with testpaths option, and not passing anything in the command-line\n testdir.makeini(\n \"\"\"\n [pytest]\n testpaths = tests gui\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\"rootdir: *test_header0, inifile: tox.ini, testpaths: tests, gui\"]\n )\n\n # with testpaths option, passing directory in command-line: do not show testpaths then\n result = testdir.runpytest(\"tests\")\n result.stdout.fnmatch_lines([\"rootdir: *test_header0, inifile: tox.ini\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_showlocals_TestTerminalFunctional.verbose_testfile.return.testdir_makepyfile_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_showlocals_TestTerminalFunctional.verbose_testfile.return.testdir_makepyfile_", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 660, "end_line": 695, "span_ids": ["TestTerminalFunctional.verbose_testfile", "TestTerminalFunctional.test_showlocals"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTerminalFunctional(object):\n\n def test_showlocals(self, testdir):\n p1 = testdir.makepyfile(\n \"\"\"\n def test_showlocals():\n x = 3\n y = \"x\" * 5000\n assert 0\n \"\"\"\n )\n result = testdir.runpytest(p1, \"-l\")\n result.stdout.fnmatch_lines(\n [\n # \"_ _ * Locals *\",\n \"x* = 3\",\n \"y* = 'xxxxxx*\",\n ]\n )\n\n @pytest.fixture\n def verbose_testfile(self, testdir):\n return testdir.makepyfile(\n \"\"\"\n import pytest\n def test_fail():\n raise ValueError()\n def test_pass():\n pass\n class TestClass(object):\n def test_skip(self):\n pytest.skip(\"hello\")\n def test_gen():\n def check(x):\n assert x == 1\n yield check, 0\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_verbose_reporting_TestTerminalFunctional.test_verbose_reporting.assert_result_ret_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_verbose_reporting_TestTerminalFunctional.test_verbose_reporting.assert_result_ret_1", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 697, "end_line": 709, "span_ids": ["TestTerminalFunctional.test_verbose_reporting"], "tokens": 126}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTerminalFunctional(object):\n\n def test_verbose_reporting(self, verbose_testfile, testdir, pytestconfig):\n result = testdir.runpytest(\n verbose_testfile, \"-v\", \"-Walways::pytest.PytestWarning\"\n )\n result.stdout.fnmatch_lines(\n [\n \"*test_verbose_reporting.py::test_fail *FAIL*\",\n \"*test_verbose_reporting.py::test_pass *PASS*\",\n \"*test_verbose_reporting.py::TestClass::test_skip *SKIP*\",\n \"*test_verbose_reporting.py::test_gen *XFAIL*\",\n ]\n )\n assert result.ret == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_verbose_reporting_xdist_TestTerminalFunctional.test_verbose_reporting_xdist.assert_result_ret_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_verbose_reporting_xdist_TestTerminalFunctional.test_verbose_reporting_xdist.assert_result_ret_1", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 711, "end_line": 721, "span_ids": ["TestTerminalFunctional.test_verbose_reporting_xdist"], "tokens": 113}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTerminalFunctional(object):\n\n def test_verbose_reporting_xdist(self, verbose_testfile, testdir, pytestconfig):\n if not pytestconfig.pluginmanager.get_plugin(\"xdist\"):\n pytest.skip(\"xdist plugin not installed\")\n\n result = testdir.runpytest(\n verbose_testfile, \"-v\", \"-n 1\", \"-Walways::pytest.PytestWarning\"\n )\n result.stdout.fnmatch_lines(\n [\"*FAIL*test_verbose_reporting_xdist.py::test_fail*\"]\n )\n assert result.ret == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_quiet_reporting_TestTerminalFunctional.test_more_quiet_reporting.assert_passed_not_in_s": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_quiet_reporting_TestTerminalFunctional.test_more_quiet_reporting.assert_passed_not_in_s", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 723, "end_line": 739, "span_ids": ["TestTerminalFunctional.test_more_quiet_reporting", "TestTerminalFunctional.test_quiet_reporting"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTerminalFunctional(object):\n\n def test_quiet_reporting(self, testdir):\n p1 = testdir.makepyfile(\"def test_pass(): pass\")\n result = testdir.runpytest(p1, \"-q\")\n s = result.stdout.str()\n assert \"test session starts\" not in s\n assert p1.basename not in s\n assert \"===\" not in s\n assert \"passed\" in s\n\n def test_more_quiet_reporting(self, testdir):\n p1 = testdir.makepyfile(\"def test_pass(): pass\")\n result = testdir.runpytest(p1, \"-qq\")\n s = result.stdout.str()\n assert \"test session starts\" not in s\n assert p1.basename not in s\n assert \"===\" not in s\n assert \"passed\" not in s", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_report_collectionfinish_hook_TestTerminalFunctional.test_report_collectionfinish_hook.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestTerminalFunctional.test_report_collectionfinish_hook_TestTerminalFunctional.test_report_collectionfinish_hook.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 741, "end_line": 760, "span_ids": ["TestTerminalFunctional.test_report_collectionfinish_hook"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTerminalFunctional(object):\n\n @pytest.mark.parametrize(\n \"params\", [(), (\"--collect-only\",)], ids=[\"no-params\", \"collect-only\"]\n )\n def test_report_collectionfinish_hook(self, testdir, params):\n testdir.makeconftest(\n \"\"\"\n def pytest_report_collectionfinish(config, startdir, items):\n return ['hello from hook: {0} items'.format(len(items))]\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.parametrize('i', range(3))\n def test(i):\n pass\n \"\"\"\n )\n result = testdir.runpytest(*params)\n result.stdout.fnmatch_lines([\"collected 3 items\", \"hello from hook: 3 items\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_pass_output_reporting_test_pass_output_reporting.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_pass_output_reporting_test_pass_output_reporting.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 797, "end_line": 823, "span_ids": ["test_pass_output_reporting"], "tokens": 213}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pass_output_reporting(testdir):\n testdir.makepyfile(\n \"\"\"\n def test_pass_has_output():\n print(\"Four score and seven years ago...\")\n def test_pass_no_output():\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n s = result.stdout.str()\n assert \"test_pass_has_output\" not in s\n assert \"Four score and seven years ago...\" not in s\n assert \"test_pass_no_output\" not in s\n result = testdir.runpytest(\"-rPp\")\n result.stdout.fnmatch_lines(\n [\n \"*= PASSES =*\",\n \"*_ test_pass_has_output _*\",\n \"*- Captured stdout call -*\",\n \"Four score and seven years ago...\",\n \"*= short test summary info =*\",\n \"PASSED test_pass_output_reporting.py::test_pass_has_output\",\n \"PASSED test_pass_output_reporting.py::test_pass_no_output\",\n \"*= 2 passed in *\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_color_yes_test_color_no.assert_x1b_1m_not_in_r": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_color_yes_test_color_no.assert_x1b_1m_not_in_r", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 826, "end_line": 837, "span_ids": ["test_color_no", "test_color_yes"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_color_yes(testdir):\n testdir.makepyfile(\"def test_this(): assert 1\")\n result = testdir.runpytest(\"--color=yes\")\n assert \"test session starts\" in result.stdout.str()\n assert \"\\x1b[1m\" in result.stdout.str()\n\n\ndef test_color_no(testdir):\n testdir.makepyfile(\"def test_this(): assert 1\")\n result = testdir.runpytest(\"--color=no\")\n assert \"test session starts\" in result.stdout.str()\n assert \"\\x1b[1m\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_color_yes_collection_on_non_atty_test_color_yes_collection_on_non_atty.assert_collected_10_item": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_color_yes_collection_on_non_atty_test_color_yes_collection_on_non_atty.assert_collected_10_item", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 840, "end_line": 862, "span_ids": ["test_color_yes_collection_on_non_atty"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"verbose\", [True, False])\ndef test_color_yes_collection_on_non_atty(testdir, verbose):\n \"\"\"skip collect progress report when working on non-terminals.\n #1397\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.parametrize('i', range(10))\n def test_this(i):\n assert 1\n \"\"\"\n )\n args = [\"--color=yes\"]\n if verbose:\n args.append(\"-vv\")\n result = testdir.runpytest(*args)\n assert \"test session starts\" in result.stdout.str()\n assert \"\\x1b[1m\" in result.stdout.str()\n assert \"collecting 10 items\" not in result.stdout.str()\n if verbose:\n assert \"collecting ...\" in result.stdout.str()\n assert \"collected 10 items\" in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_getreportopt_test_getreportopt.None_6": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_getreportopt_test_getreportopt.None_6", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 857, "end_line": 888, "span_ids": ["test_getreportopt.Config", "test_getreportopt", "test_getreportopt.Config.Option:2"], "tokens": 233}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_getreportopt():\n class Config(object):\n class Option(object):\n reportchars = \"\"\n disable_warnings = True\n\n option = Option()\n\n config = Config()\n\n config.option.reportchars = \"sf\"\n assert getreportopt(config) == \"sf\"\n\n config.option.reportchars = \"sfxw\"\n assert getreportopt(config) == \"sfx\"\n\n # Now with --disable-warnings.\n config.option.disable_warnings = False\n config.option.reportchars = \"a\"\n assert getreportopt(config) == \"sxXwEf\" # NOTE: \"w\" included!\n\n config.option.reportchars = \"sfx\"\n assert getreportopt(config) == \"sfxw\"\n\n config.option.reportchars = \"sfxw\"\n assert getreportopt(config) == \"sfxw\"\n\n config.option.reportchars = \"a\"\n assert getreportopt(config) == \"sxXwEf\" # NOTE: \"w\" included!\n\n config.option.reportchars = \"A\"\n assert getreportopt(config) == \"sxXwEfpP\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_terminalreporter_reportopt_addopts_test_terminalreporter_reportopt_addopts.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_terminalreporter_reportopt_addopts_test_terminalreporter_reportopt_addopts.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 899, "end_line": 915, "span_ids": ["test_terminalreporter_reportopt_addopts"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_terminalreporter_reportopt_addopts(testdir):\n testdir.makeini(\"[pytest]\\naddopts=-rs\")\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture\n def tr(request):\n tr = request.config.pluginmanager.getplugin(\"terminalreporter\")\n return tr\n def test_opt(tr):\n assert tr.hasopt('skipped')\n assert not tr.hasopt('qwe')\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*1 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_tbstyle_short_test_traceconfig.assert_result_ret_EXIT": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_tbstyle_short_test_traceconfig.assert_result_ret_EXIT", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 918, "end_line": 945, "span_ids": ["test_traceconfig", "test_tbstyle_short"], "tokens": 209}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tbstyle_short(testdir):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.fixture\n def arg(request):\n return 42\n def test_opt(arg):\n x = 0\n assert x\n \"\"\"\n )\n result = testdir.runpytest(\"--tb=short\")\n s = result.stdout.str()\n assert \"arg = 42\" not in s\n assert \"x = 0\" not in s\n result.stdout.fnmatch_lines([\"*%s:8*\" % p.basename, \" assert x\", \"E assert*\"])\n result = testdir.runpytest()\n s = result.stdout.str()\n assert \"x = 0\" in s\n assert \"assert x\" in s\n\n\ndef test_traceconfig(testdir, monkeypatch):\n result = testdir.runpytest(\"--traceconfig\")\n result.stdout.fnmatch_lines([\"*active plugins*\"])\n assert result.ret == EXIT_NOTESTSCOLLECTED", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestGenericReporting_TestGenericReporting.test_maxfailures.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestGenericReporting_TestGenericReporting.test_maxfailures.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 948, "end_line": 974, "span_ids": ["TestGenericReporting.test_collect_fail", "TestGenericReporting", "TestGenericReporting.test_maxfailures"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGenericReporting(object):\n \"\"\" this test class can be subclassed with a different option\n provider to run e.g. distributed tests.\n \"\"\"\n\n def test_collect_fail(self, testdir, option):\n testdir.makepyfile(\"import xyz\\n\")\n result = testdir.runpytest(*option.args)\n result.stdout.fnmatch_lines(\n [\"ImportError while importing*\", \"*No module named *xyz*\", \"*1 error*\"]\n )\n\n def test_maxfailures(self, testdir, option):\n testdir.makepyfile(\n \"\"\"\n def test_1():\n assert 0\n def test_2():\n assert 0\n def test_3():\n assert 0\n \"\"\"\n )\n result = testdir.runpytest(\"--maxfail=2\", *option.args)\n result.stdout.fnmatch_lines(\n [\"*def test_1():*\", \"*def test_2():*\", \"*2 failed*\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestGenericReporting.test_tb_option_TestGenericReporting.test_tb_option.for_tbopt_in_long_sh.if_tbopt_no_.else_.assert_IndexError_not_i": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestGenericReporting.test_tb_option_TestGenericReporting.test_tb_option.for_tbopt_in_long_sh.if_tbopt_no_.else_.assert_IndexError_not_i", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 976, "end_line": 1001, "span_ids": ["TestGenericReporting.test_tb_option"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGenericReporting(object):\n\n def test_tb_option(self, testdir, option):\n testdir.makepyfile(\n \"\"\"\n import pytest\n def g():\n raise IndexError\n def test_func():\n print(6*7)\n g() # --calling--\n \"\"\"\n )\n for tbopt in [\"long\", \"short\", \"no\"]:\n print(\"testing --tb=%s...\" % tbopt)\n result = testdir.runpytest(\"--tb=%s\" % tbopt)\n s = result.stdout.str()\n if tbopt == \"long\":\n assert \"print(6*7)\" in s\n else:\n assert \"print(6*7)\" not in s\n if tbopt != \"no\":\n assert \"--calling--\" in s\n assert \"IndexError\" in s\n else:\n assert \"FAILURES\" not in s\n assert \"--calling--\" not in s\n assert \"IndexError\" not in s", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestGenericReporting.test_tb_crashline_TestGenericReporting.test_tb_crashline.assert_def_test_func2_n": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestGenericReporting.test_tb_crashline_TestGenericReporting.test_tb_crashline.assert_def_test_func2_n", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 1003, "end_line": 1022, "span_ids": ["TestGenericReporting.test_tb_crashline"], "tokens": 152}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGenericReporting(object):\n\n def test_tb_crashline(self, testdir, option):\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n def g():\n raise IndexError\n def test_func1():\n print(6*7)\n g() # --calling--\n def test_func2():\n assert 0, \"hello\"\n \"\"\"\n )\n result = testdir.runpytest(\"--tb=line\")\n bn = p.basename\n result.stdout.fnmatch_lines(\n [\"*%s:3: IndexError*\" % bn, \"*%s:8: AssertionError: hello*\" % bn]\n )\n s = result.stdout.str()\n assert \"def test_func2\" not in s", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestGenericReporting.test_pytest_report_header_TestGenericReporting.test_pytest_report_header.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestGenericReporting.test_pytest_report_header_TestGenericReporting.test_pytest_report_header.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 1024, "end_line": 1040, "span_ids": ["TestGenericReporting.test_pytest_report_header"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGenericReporting(object):\n\n def test_pytest_report_header(self, testdir, option):\n testdir.makeconftest(\n \"\"\"\n def pytest_sessionstart(session):\n session.config._somevalue = 42\n def pytest_report_header(config):\n return \"hello: %s\" % config._somevalue\n \"\"\"\n )\n testdir.mkdir(\"a\").join(\"conftest.py\").write(\n \"\"\"\ndef pytest_report_header(config, startdir):\n return [\"line1\", str(startdir)]\n\"\"\"\n )\n result = testdir.runpytest(\"a\")\n result.stdout.fnmatch_lines([\"*hello: 42*\", \"line1\", str(testdir.tmpdir)])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestGenericReporting.test_show_capture_TestGenericReporting.test_show_capture.None_11": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestGenericReporting.test_show_capture_TestGenericReporting.test_show_capture.None_11", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 1042, "end_line": 1091, "span_ids": ["TestGenericReporting.test_show_capture"], "tokens": 412}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGenericReporting(object):\n\n def test_show_capture(self, testdir):\n testdir.makepyfile(\n \"\"\"\n import sys\n import logging\n def test_one():\n sys.stdout.write('!This is stdout!')\n sys.stderr.write('!This is stderr!')\n logging.warning('!This is a warning log msg!')\n assert False, 'Something failed'\n \"\"\"\n )\n\n result = testdir.runpytest(\"--tb=short\")\n result.stdout.fnmatch_lines(\n [\n \"!This is stdout!\",\n \"!This is stderr!\",\n \"*WARNING*!This is a warning log msg!\",\n ]\n )\n\n result = testdir.runpytest(\"--show-capture=all\", \"--tb=short\")\n result.stdout.fnmatch_lines(\n [\n \"!This is stdout!\",\n \"!This is stderr!\",\n \"*WARNING*!This is a warning log msg!\",\n ]\n )\n\n stdout = testdir.runpytest(\"--show-capture=stdout\", \"--tb=short\").stdout.str()\n assert \"!This is stderr!\" not in stdout\n assert \"!This is stdout!\" in stdout\n assert \"!This is a warning log msg!\" not in stdout\n\n stdout = testdir.runpytest(\"--show-capture=stderr\", \"--tb=short\").stdout.str()\n assert \"!This is stdout!\" not in stdout\n assert \"!This is stderr!\" in stdout\n assert \"!This is a warning log msg!\" not in stdout\n\n stdout = testdir.runpytest(\"--show-capture=log\", \"--tb=short\").stdout.str()\n assert \"!This is stdout!\" not in stdout\n assert \"!This is stderr!\" not in stdout\n assert \"!This is a warning log msg!\" in stdout\n\n stdout = testdir.runpytest(\"--show-capture=no\", \"--tb=short\").stdout.str()\n assert \"!This is stdout!\" not in stdout\n assert \"!This is stderr!\" not in stdout\n assert \"!This is a warning log msg!\" not in stdout", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestGenericReporting.test_show_capture_with_teardown_logs_TestGenericReporting.test_show_capture_with_teardown_logs.None_11": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestGenericReporting.test_show_capture_with_teardown_logs_TestGenericReporting.test_show_capture_with_teardown_logs.None_11", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 1093, "end_line": 1131, "span_ids": ["TestGenericReporting.test_show_capture_with_teardown_logs"], "tokens": 303}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestGenericReporting(object):\n\n def test_show_capture_with_teardown_logs(self, testdir):\n \"\"\"Ensure that the capturing of teardown logs honor --show-capture setting\"\"\"\n testdir.makepyfile(\n \"\"\"\n import logging\n import sys\n import pytest\n\n @pytest.fixture(scope=\"function\", autouse=\"True\")\n def hook_each_test(request):\n yield\n sys.stdout.write(\"!stdout!\")\n sys.stderr.write(\"!stderr!\")\n logging.warning(\"!log!\")\n\n def test_func():\n assert False\n \"\"\"\n )\n\n result = testdir.runpytest(\"--show-capture=stdout\", \"--tb=short\").stdout.str()\n assert \"!stdout!\" in result\n assert \"!stderr!\" not in result\n assert \"!log!\" not in result\n\n result = testdir.runpytest(\"--show-capture=stderr\", \"--tb=short\").stdout.str()\n assert \"!stdout!\" not in result\n assert \"!stderr!\" in result\n assert \"!log!\" not in result\n\n result = testdir.runpytest(\"--show-capture=log\", \"--tb=short\").stdout.str()\n assert \"!stdout!\" not in result\n assert \"!stderr!\" not in result\n assert \"!log!\" in result\n\n result = testdir.runpytest(\"--show-capture=no\", \"--tb=short\").stdout.str()\n assert \"!stdout!\" not in result\n assert \"!stderr!\" not in result\n assert \"!log!\" not in result", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_fdopen_kept_alive_issue124_test_fdopen_kept_alive_issue124.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_fdopen_kept_alive_issue124_test_fdopen_kept_alive_issue124.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 1134, "end_line": 1150, "span_ids": ["test_fdopen_kept_alive_issue124"], "tokens": 117}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.xfail(\"not hasattr(os, 'dup')\")\ndef test_fdopen_kept_alive_issue124(testdir):\n testdir.makepyfile(\n \"\"\"\n import os, sys\n k = []\n def test_open_file_and_keep_alive(capfd):\n stdout = os.fdopen(1, 'w', 1)\n k.append(stdout)\n\n def test_close_kept_alive_file():\n stdout = k.pop()\n stdout.close()\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*2 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_tbstyle_native_setup_error_test_terminal_summary.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_tbstyle_native_setup_error_test_terminal_summary.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 1153, "end_line": 1188, "span_ids": ["test_tbstyle_native_setup_error", "test_terminal_summary"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tbstyle_native_setup_error(testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture\n def setup_error_fixture():\n raise Exception(\"error in exception\")\n\n def test_error_fixture(setup_error_fixture):\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"--tb=native\")\n result.stdout.fnmatch_lines(\n ['*File *test_tbstyle_native_setup_error.py\", line *, in setup_error_fixture*']\n )\n\n\ndef test_terminal_summary(testdir):\n testdir.makeconftest(\n \"\"\"\n def pytest_terminal_summary(terminalreporter, exitstatus):\n w = terminalreporter\n w.section(\"hello\")\n w.line(\"world\")\n w.line(\"exitstatus: {0}\".format(exitstatus))\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n \"\"\"\n *==== hello ====*\n world\n exitstatus: 5\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_terminal_summary_warnings_are_displayed_test_terminal_summary_warnings_are_displayed.assert_stdout_count_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_terminal_summary_warnings_are_displayed_test_terminal_summary_warnings_are_displayed.assert_stdout_count_", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 1191, "end_line": 1225, "span_ids": ["test_terminal_summary_warnings_are_displayed"], "tokens": 240}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"default\")\ndef test_terminal_summary_warnings_are_displayed(testdir):\n \"\"\"Test that warnings emitted during pytest_terminal_summary are displayed.\n (#1305).\n \"\"\"\n testdir.makeconftest(\n \"\"\"\n import warnings\n def pytest_terminal_summary(terminalreporter):\n warnings.warn(UserWarning('internal warning'))\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n def test_failure():\n import warnings\n warnings.warn(\"warning_from_\" + \"test\")\n assert 0\n \"\"\"\n )\n result = testdir.runpytest(\"-ra\")\n result.stdout.fnmatch_lines(\n [\n \"*= warnings summary =*\",\n \"*warning_from_test*\",\n \"*= short test summary info =*\",\n \"*= warnings summary (final) =*\",\n \"*conftest.py:3:*internal warning\",\n \"*== 1 failed, 2 warnings in *\",\n ]\n )\n assert \"None\" not in result.stdout.str()\n stdout = result.stdout.str()\n assert stdout.count(\"warning_from_test\") == 1\n assert stdout.count(\"=== warnings summary \") == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_terminal_summary_warnings_header_once_test_terminal_summary_warnings_header_once.assert_stdout_count_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_terminal_summary_warnings_header_once_test_terminal_summary_warnings_header_once.assert_stdout_count_", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 1228, "end_line": 1250, "span_ids": ["test_terminal_summary_warnings_header_once"], "tokens": 159}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"default\")\ndef test_terminal_summary_warnings_header_once(testdir):\n testdir.makepyfile(\n \"\"\"\n def test_failure():\n import warnings\n warnings.warn(\"warning_from_\" + \"test\")\n assert 0\n \"\"\"\n )\n result = testdir.runpytest(\"-ra\")\n result.stdout.fnmatch_lines(\n [\n \"*= warnings summary =*\",\n \"*warning_from_test*\",\n \"*= short test summary info =*\",\n \"*== 1 failed, 1 warnings in *\",\n ]\n )\n assert \"None\" not in result.stdout.str()\n stdout = result.stdout.str()\n assert stdout.count(\"warning_from_test\") == 1\n assert stdout.count(\"=== warnings summary \") == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_summary_stats_test_summary_stats.assert_color_exp_color": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_summary_stats_test_summary_stats.assert_color_exp_color", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 1253, "end_line": 1311, "span_ids": ["test_summary_stats"], "tokens": 824}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"exp_color, exp_line, stats_arg\",\n [\n # The method under test only cares about the length of each\n # dict value, not the actual contents, so tuples of anything\n # suffice\n # Important statuses -- the highest priority of these always wins\n (\"red\", \"1 failed\", {\"failed\": (1,)}),\n (\"red\", \"1 failed, 1 passed\", {\"failed\": (1,), \"passed\": (1,)}),\n (\"red\", \"1 error\", {\"error\": (1,)}),\n (\"red\", \"1 passed, 1 error\", {\"error\": (1,), \"passed\": (1,)}),\n # (a status that's not known to the code)\n (\"yellow\", \"1 weird\", {\"weird\": (1,)}),\n (\"yellow\", \"1 passed, 1 weird\", {\"weird\": (1,), \"passed\": (1,)}),\n (\"yellow\", \"1 warnings\", {\"warnings\": (1,)}),\n (\"yellow\", \"1 passed, 1 warnings\", {\"warnings\": (1,), \"passed\": (1,)}),\n (\"green\", \"5 passed\", {\"passed\": (1, 2, 3, 4, 5)}),\n # \"Boring\" statuses. These have no effect on the color of the summary\n # line. Thus, if *every* test has a boring status, the summary line stays\n # at its default color, i.e. yellow, to warn the user that the test run\n # produced no useful information\n (\"yellow\", \"1 skipped\", {\"skipped\": (1,)}),\n (\"green\", \"1 passed, 1 skipped\", {\"skipped\": (1,), \"passed\": (1,)}),\n (\"yellow\", \"1 deselected\", {\"deselected\": (1,)}),\n (\"green\", \"1 passed, 1 deselected\", {\"deselected\": (1,), \"passed\": (1,)}),\n (\"yellow\", \"1 xfailed\", {\"xfailed\": (1,)}),\n (\"green\", \"1 passed, 1 xfailed\", {\"xfailed\": (1,), \"passed\": (1,)}),\n (\"yellow\", \"1 xpassed\", {\"xpassed\": (1,)}),\n (\"green\", \"1 passed, 1 xpassed\", {\"xpassed\": (1,), \"passed\": (1,)}),\n # Likewise if no tests were found at all\n (\"yellow\", \"no tests ran\", {}),\n # Test the empty-key special case\n (\"yellow\", \"no tests ran\", {\"\": (1,)}),\n (\"green\", \"1 passed\", {\"\": (1,), \"passed\": (1,)}),\n # A couple more complex combinations\n (\n \"red\",\n \"1 failed, 2 passed, 3 xfailed\",\n {\"passed\": (1, 2), \"failed\": (1,), \"xfailed\": (1, 2, 3)},\n ),\n (\n \"green\",\n \"1 passed, 2 skipped, 3 deselected, 2 xfailed\",\n {\n \"passed\": (1,),\n \"skipped\": (1, 2),\n \"deselected\": (1, 2, 3),\n \"xfailed\": (1, 2),\n },\n ),\n ],\n)\ndef test_summary_stats(exp_line, exp_color, stats_arg):\n print(\"Based on stats: %s\" % stats_arg)\n print('Expect summary: \"{}\"; with color \"{}\"'.format(exp_line, exp_color))\n (line, color) = build_summary_stats_line(stats_arg)\n print('Actually got: \"{}\"; with color \"{}\"'.format(line, color))\n assert line == exp_line\n assert color == exp_color", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_skip_counting_towards_summary_TestClassicOutputStyle.test_normal_verbosity.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_skip_counting_towards_summary_TestClassicOutputStyle.test_normal_verbosity.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 1314, "end_line": 1354, "span_ids": ["TestClassicOutputStyle.test_files", "test_skip_counting_towards_summary", "TestClassicOutputStyle", "TestClassicOutputStyle.test_normal_verbosity", "test_skip_counting_towards_summary.DummyReport:2", "test_skip_counting_towards_summary.DummyReport"], "tokens": 304}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_skip_counting_towards_summary():\n class DummyReport(BaseReport):\n count_towards_summary = True\n\n r1 = DummyReport()\n r2 = DummyReport()\n res = build_summary_stats_line({\"failed\": (r1, r2)})\n assert res == (\"2 failed\", \"red\")\n\n r1.count_towards_summary = False\n res = build_summary_stats_line({\"failed\": (r1, r2)})\n assert res == (\"1 failed\", \"red\")\n\n\nclass TestClassicOutputStyle(object):\n \"\"\"Ensure classic output style works as expected (#3883)\"\"\"\n\n @pytest.fixture\n def test_files(self, testdir):\n testdir.makepyfile(\n **{\n \"test_one.py\": \"def test_one(): pass\",\n \"test_two.py\": \"def test_two(): assert 0\",\n \"sub/test_three.py\": \"\"\"\n def test_three_1(): pass\n def test_three_2(): assert 0\n def test_three_3(): pass\n \"\"\",\n }\n )\n\n def test_normal_verbosity(self, testdir, test_files):\n result = testdir.runpytest(\"-o\", \"console_output_style=classic\")\n result.stdout.fnmatch_lines(\n [\n \"test_one.py .\",\n \"test_two.py F\",\n \"sub{}test_three.py .F.\".format(os.sep),\n \"*2 failed, 3 passed in*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestClassicOutputStyle.test_verbose_TestClassicOutputStyle.test_quiet.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestClassicOutputStyle.test_verbose_TestClassicOutputStyle.test_quiet.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 1356, "end_line": 1371, "span_ids": ["TestClassicOutputStyle.test_quiet", "TestClassicOutputStyle.test_verbose"], "tokens": 194}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestClassicOutputStyle(object):\n\n def test_verbose(self, testdir, test_files):\n result = testdir.runpytest(\"-o\", \"console_output_style=classic\", \"-v\")\n result.stdout.fnmatch_lines(\n [\n \"test_one.py::test_one PASSED\",\n \"test_two.py::test_two FAILED\",\n \"sub{}test_three.py::test_three_1 PASSED\".format(os.sep),\n \"sub{}test_three.py::test_three_2 FAILED\".format(os.sep),\n \"sub{}test_three.py::test_three_3 PASSED\".format(os.sep),\n \"*2 failed, 3 passed in*\",\n ]\n )\n\n def test_quiet(self, testdir, test_files):\n result = testdir.runpytest(\"-o\", \"console_output_style=classic\", \"-q\")\n result.stdout.fnmatch_lines([\".F.F.\", \"*2 failed, 3 passed in*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressOutputStyle_TestProgressOutputStyle.many_tests_files.testdir_makepyfile_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressOutputStyle_TestProgressOutputStyle.many_tests_files.testdir_makepyfile_", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 1374, "end_line": 1393, "span_ids": ["TestProgressOutputStyle.many_tests_files", "TestProgressOutputStyle"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestProgressOutputStyle(object):\n @pytest.fixture\n def many_tests_files(self, testdir):\n testdir.makepyfile(\n test_bar=\"\"\"\n import pytest\n @pytest.mark.parametrize('i', range(10))\n def test_bar(i): pass\n \"\"\",\n test_foo=\"\"\"\n import pytest\n @pytest.mark.parametrize('i', range(5))\n def test_foo(i): pass\n \"\"\",\n test_foobar=\"\"\"\n import pytest\n @pytest.mark.parametrize('i', range(5))\n def test_foobar(i): pass\n \"\"\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressOutputStyle.test_zero_tests_collected_TestProgressOutputStyle.test_zero_tests_collected.output_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressOutputStyle.test_zero_tests_collected_TestProgressOutputStyle.test_zero_tests_collected.output_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 1395, "end_line": 1411, "span_ids": ["TestProgressOutputStyle.test_zero_tests_collected"], "tokens": 178}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestProgressOutputStyle(object):\n\n def test_zero_tests_collected(self, testdir):\n \"\"\"Some plugins (testmon for example) might issue pytest_runtest_logreport without any tests being\n actually collected (#2971).\"\"\"\n testdir.makeconftest(\n \"\"\"\n def pytest_collection_modifyitems(items, config):\n from _pytest.runner import CollectReport\n for node_id in ('nodeid1', 'nodeid2'):\n rep = CollectReport(node_id, 'passed', None, None)\n rep.when = 'passed'\n rep.duration = 0.1\n config.hook.pytest_runtest_logreport(report=rep)\n \"\"\"\n )\n output = testdir.runpytest()\n assert \"ZeroDivisionError\" not in output.stdout.str()\n output.stdout.fnmatch_lines([\"=* 2 passed in *=\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressOutputStyle.test_normal_TestProgressOutputStyle.test_count.output_stdout_re_match_li": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressOutputStyle.test_normal_TestProgressOutputStyle.test_count.output_stdout_re_match_li", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 1413, "end_line": 1437, "span_ids": ["TestProgressOutputStyle.test_normal", "TestProgressOutputStyle.test_count"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestProgressOutputStyle(object):\n\n def test_normal(self, many_tests_files, testdir):\n output = testdir.runpytest()\n output.stdout.re_match_lines(\n [\n r\"test_bar.py \\.{10} \\s+ \\[ 50%\\]\",\n r\"test_foo.py \\.{5} \\s+ \\[ 75%\\]\",\n r\"test_foobar.py \\.{5} \\s+ \\[100%\\]\",\n ]\n )\n\n def test_count(self, many_tests_files, testdir):\n testdir.makeini(\n \"\"\"\n [pytest]\n console_output_style = count\n \"\"\"\n )\n output = testdir.runpytest()\n output.stdout.re_match_lines(\n [\n r\"test_bar.py \\.{10} \\s+ \\[10/20\\]\",\n r\"test_foo.py \\.{5} \\s+ \\[15/20\\]\",\n r\"test_foobar.py \\.{5} \\s+ \\[20/20\\]\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressOutputStyle.test_verbose_TestProgressOutputStyle.test_verbose.output_stdout_re_match_li": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressOutputStyle.test_verbose_TestProgressOutputStyle.test_verbose.output_stdout_re_match_li", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 1439, "end_line": 1447, "span_ids": ["TestProgressOutputStyle.test_verbose"], "tokens": 121}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestProgressOutputStyle(object):\n\n def test_verbose(self, many_tests_files, testdir):\n output = testdir.runpytest(\"-v\")\n output.stdout.re_match_lines(\n [\n r\"test_bar.py::test_bar\\[0\\] PASSED \\s+ \\[ 5%\\]\",\n r\"test_foo.py::test_foo\\[4\\] PASSED \\s+ \\[ 75%\\]\",\n r\"test_foobar.py::test_foobar\\[4\\] PASSED \\s+ \\[100%\\]\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressOutputStyle.test_verbose_count_TestProgressOutputStyle.test_verbose_count.output_stdout_re_match_li": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressOutputStyle.test_verbose_count_TestProgressOutputStyle.test_verbose_count.output_stdout_re_match_li", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 1449, "end_line": 1463, "span_ids": ["TestProgressOutputStyle.test_verbose_count"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestProgressOutputStyle(object):\n\n def test_verbose_count(self, many_tests_files, testdir):\n testdir.makeini(\n \"\"\"\n [pytest]\n console_output_style = count\n \"\"\"\n )\n output = testdir.runpytest(\"-v\")\n output.stdout.re_match_lines(\n [\n r\"test_bar.py::test_bar\\[0\\] PASSED \\s+ \\[ 1/20\\]\",\n r\"test_foo.py::test_foo\\[4\\] PASSED \\s+ \\[15/20\\]\",\n r\"test_foobar.py::test_foobar\\[4\\] PASSED \\s+ \\[20/20\\]\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressOutputStyle.test_xdist_normal_TestProgressOutputStyle.test_xdist_normal_count.output_stdout_re_match_li": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressOutputStyle.test_xdist_normal_TestProgressOutputStyle.test_xdist_normal_count.output_stdout_re_match_li", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 1465, "end_line": 1481, "span_ids": ["TestProgressOutputStyle.test_xdist_normal", "TestProgressOutputStyle.test_xdist_normal_count"], "tokens": 183}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestProgressOutputStyle(object):\n\n def test_xdist_normal(self, many_tests_files, testdir, monkeypatch):\n pytest.importorskip(\"xdist\")\n monkeypatch.delenv(\"PYTEST_DISABLE_PLUGIN_AUTOLOAD\", raising=False)\n output = testdir.runpytest(\"-n2\")\n output.stdout.re_match_lines([r\"\\.{20} \\s+ \\[100%\\]\"])\n\n def test_xdist_normal_count(self, many_tests_files, testdir, monkeypatch):\n pytest.importorskip(\"xdist\")\n monkeypatch.delenv(\"PYTEST_DISABLE_PLUGIN_AUTOLOAD\", raising=False)\n testdir.makeini(\n \"\"\"\n [pytest]\n console_output_style = count\n \"\"\"\n )\n output = testdir.runpytest(\"-n2\")\n output.stdout.re_match_lines([r\"\\.{20} \\s+ \\[20/20\\]\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressOutputStyle.test_xdist_verbose_TestProgressOutputStyle.test_capture_no.assert_not_in_output": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressOutputStyle.test_xdist_verbose_TestProgressOutputStyle.test_capture_no.assert_not_in_output", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 1483, "end_line": 1502, "span_ids": ["TestProgressOutputStyle.test_xdist_verbose", "TestProgressOutputStyle.test_capture_no"], "tokens": 255}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestProgressOutputStyle(object):\n\n def test_xdist_verbose(self, many_tests_files, testdir, monkeypatch):\n pytest.importorskip(\"xdist\")\n monkeypatch.delenv(\"PYTEST_DISABLE_PLUGIN_AUTOLOAD\", raising=False)\n output = testdir.runpytest(\"-n2\", \"-v\")\n output.stdout.re_match_lines_random(\n [\n r\"\\[gw\\d\\] \\[\\s*\\d+%\\] PASSED test_bar.py::test_bar\\[1\\]\",\n r\"\\[gw\\d\\] \\[\\s*\\d+%\\] PASSED test_foo.py::test_foo\\[1\\]\",\n r\"\\[gw\\d\\] \\[\\s*\\d+%\\] PASSED test_foobar.py::test_foobar\\[1\\]\",\n ]\n )\n\n def test_capture_no(self, many_tests_files, testdir):\n output = testdir.runpytest(\"-s\")\n output.stdout.re_match_lines(\n [r\"test_bar.py \\.{10}\", r\"test_foo.py \\.{5}\", r\"test_foobar.py \\.{5}\"]\n )\n\n output = testdir.runpytest(\"--capture=no\")\n assert \"%]\" not in output.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressWithTeardown_TestProgressWithTeardown.many_files.testdir_makepyfile_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressWithTeardown_TestProgressWithTeardown.many_files.testdir_makepyfile_", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 1505, "end_line": 1536, "span_ids": ["TestProgressWithTeardown.contest_with_teardown_fixture", "TestProgressWithTeardown", "TestProgressWithTeardown.many_files"], "tokens": 187}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestProgressWithTeardown(object):\n \"\"\"Ensure we show the correct percentages for tests that fail during teardown (#3088)\"\"\"\n\n @pytest.fixture\n def contest_with_teardown_fixture(self, testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n\n @pytest.fixture\n def fail_teardown():\n yield\n assert False\n \"\"\"\n )\n\n @pytest.fixture\n def many_files(self, testdir, contest_with_teardown_fixture):\n testdir.makepyfile(\n test_bar=\"\"\"\n import pytest\n @pytest.mark.parametrize('i', range(5))\n def test_bar(fail_teardown, i):\n pass\n \"\"\",\n test_foo=\"\"\"\n import pytest\n @pytest.mark.parametrize('i', range(15))\n def test_foo(fail_teardown, i):\n pass\n \"\"\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressWithTeardown.test_teardown_simple_TestProgressWithTeardown.test_teardown_many.output_stdout_re_match_li": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressWithTeardown.test_teardown_simple_TestProgressWithTeardown.test_teardown_many.output_stdout_re_match_li", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 1538, "end_line": 1566, "span_ids": ["TestProgressWithTeardown.test_teardown_with_test_also_failing", "TestProgressWithTeardown.test_teardown_many", "TestProgressWithTeardown.test_teardown_simple"], "tokens": 245}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestProgressWithTeardown(object):\n\n def test_teardown_simple(self, testdir, contest_with_teardown_fixture):\n testdir.makepyfile(\n \"\"\"\n def test_foo(fail_teardown):\n pass\n \"\"\"\n )\n output = testdir.runpytest()\n output.stdout.re_match_lines([r\"test_teardown_simple.py \\.E\\s+\\[100%\\]\"])\n\n def test_teardown_with_test_also_failing(\n self, testdir, contest_with_teardown_fixture\n ):\n testdir.makepyfile(\n \"\"\"\n def test_foo(fail_teardown):\n assert False\n \"\"\"\n )\n output = testdir.runpytest()\n output.stdout.re_match_lines(\n [r\"test_teardown_with_test_also_failing.py FE\\s+\\[100%\\]\"]\n )\n\n def test_teardown_many(self, testdir, many_files):\n output = testdir.runpytest()\n output.stdout.re_match_lines(\n [r\"test_bar.py (\\.E){5}\\s+\\[ 25%\\]\", r\"test_foo.py (\\.E){15}\\s+\\[100%\\]\"]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressWithTeardown.test_teardown_many_verbose_TestProgressWithTeardown.test_xdist_normal.output_stdout_re_match_li": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_TestProgressWithTeardown.test_teardown_many_verbose_TestProgressWithTeardown.test_xdist_normal.output_stdout_re_match_li", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 1568, "end_line": 1583, "span_ids": ["TestProgressWithTeardown.test_teardown_many_verbose", "TestProgressWithTeardown.test_xdist_normal"], "tokens": 210}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestProgressWithTeardown(object):\n\n def test_teardown_many_verbose(self, testdir, many_files):\n output = testdir.runpytest(\"-v\")\n output.stdout.re_match_lines(\n [\n r\"test_bar.py::test_bar\\[0\\] PASSED\\s+\\[ 5%\\]\",\n r\"test_bar.py::test_bar\\[0\\] ERROR\\s+\\[ 5%\\]\",\n r\"test_bar.py::test_bar\\[4\\] PASSED\\s+\\[ 25%\\]\",\n r\"test_bar.py::test_bar\\[4\\] ERROR\\s+\\[ 25%\\]\",\n ]\n )\n\n def test_xdist_normal(self, many_files, testdir, monkeypatch):\n pytest.importorskip(\"xdist\")\n monkeypatch.delenv(\"PYTEST_DISABLE_PLUGIN_AUTOLOAD\", raising=False)\n output = testdir.runpytest(\"-n2\")\n output.stdout.re_match_lines([r\"[\\.E]{40} \\s+ \\[100%\\]\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_from___future___import_ab_FakeConfig.option.return.self": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_from___future___import_ab_FakeConfig.option.return.self", "embedding": null, "metadata": {"file_path": "testing/test_tmpdir.py", "file_name": "test_tmpdir.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 43, "span_ids": ["FakeConfig", "FakeConfig.option", "FakeConfig.get", "test_tmpdir_fixture", "test_ensuretemp", "FakeConfig.trace", "imports"], "tokens": 216}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\n\nimport attr\nimport six\n\nimport pytest\nfrom _pytest import pathlib\nfrom _pytest.pathlib import Path\nfrom _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG\n\n\ndef test_tmpdir_fixture(testdir):\n p = testdir.copy_example(\"tmpdir/tmpdir_fixture.py\")\n results = testdir.runpytest(p)\n results.stdout.fnmatch_lines([\"*1 passed*\"])\n\n\ndef test_ensuretemp(recwarn):\n d1 = pytest.ensuretemp(\"hello\")\n d2 = pytest.ensuretemp(\"hello\")\n assert d1 == d2\n assert d1.check(dir=1)\n\n\n@attr.s\nclass FakeConfig(object):\n basetemp = attr.ib()\n trace = attr.ib(default=None)\n\n @property\n def trace(self):\n return self\n\n def get(self, key):\n return lambda *k: None\n\n @property\n def option(self):\n return self", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_TestTempdirHandler_TestTempdirHandler.test_tmppath_relative_basetemp_absolute.assert_t_getbasetemp_re": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_TestTempdirHandler_TestTempdirHandler.test_tmppath_relative_basetemp_absolute.assert_t_getbasetemp_re", "embedding": null, "metadata": {"file_path": "testing/test_tmpdir.py", "file_name": "test_tmpdir.py", "file_type": "text/x-python", "category": "test", "start_line": 46, "end_line": 68, "span_ids": ["TestTempdirHandler.test_tmppath_relative_basetemp_absolute", "TestTempdirHandler", "TestTempdirHandler.test_mktemp"], "tokens": 219}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTempdirHandler(object):\n def test_mktemp(self, tmp_path):\n\n from _pytest.tmpdir import TempdirFactory, TempPathFactory\n\n config = FakeConfig(tmp_path)\n t = TempdirFactory(TempPathFactory.from_config(config))\n tmp = t.mktemp(\"world\")\n assert tmp.relto(t.getbasetemp()) == \"world0\"\n tmp = t.mktemp(\"this\")\n assert tmp.relto(t.getbasetemp()).startswith(\"this\")\n tmp2 = t.mktemp(\"this\")\n assert tmp2.relto(t.getbasetemp()).startswith(\"this\")\n assert tmp2 != tmp\n\n @pytest.mark.issue(4425)\n def test_tmppath_relative_basetemp_absolute(self, tmp_path, monkeypatch):\n from _pytest.tmpdir import TempPathFactory\n\n monkeypatch.chdir(tmp_path)\n config = FakeConfig(\"hello\")\n t = TempPathFactory.from_config(config)\n assert t.getbasetemp().resolve() == (tmp_path / \"hello\").resolve()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_TestConfigTmpdir_test_basetemp.assert_mytemp_join_hello": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_TestConfigTmpdir_test_basetemp.assert_mytemp_join_hello", "embedding": null, "metadata": {"file_path": "testing/test_tmpdir.py", "file_name": "test_tmpdir.py", "file_type": "text/x-python", "category": "test", "start_line": 71, "end_line": 100, "span_ids": ["test_basetemp", "TestConfigTmpdir.test_getbasetemp_custom_removes_old", "TestConfigTmpdir"], "tokens": 222}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestConfigTmpdir(object):\n def test_getbasetemp_custom_removes_old(self, testdir):\n mytemp = testdir.tmpdir.join(\"xyz\")\n p = testdir.makepyfile(\n \"\"\"\n def test_1(tmpdir):\n pass\n \"\"\"\n )\n testdir.runpytest(p, \"--basetemp=%s\" % mytemp)\n mytemp.check()\n mytemp.ensure(\"hello\")\n\n testdir.runpytest(p, \"--basetemp=%s\" % mytemp)\n mytemp.check()\n assert not mytemp.join(\"hello\").check()\n\n\ndef test_basetemp(testdir):\n mytemp = testdir.tmpdir.mkdir(\"mytemp\")\n p = testdir.makepyfile(\n \"\"\"\n import pytest\n def test_1():\n pytest.ensuretemp(\"hello\")\n \"\"\"\n )\n result = testdir.runpytest(p, \"--basetemp=%s\" % mytemp, SHOW_PYTEST_WARNINGS_ARG)\n assert result.ret == 0\n assert mytemp.join(\"hello\").check()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_test_tmpdir_always_is_realpath_test_tmpdir_always_is_realpath.assert_not_result_ret": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_test_tmpdir_always_is_realpath_test_tmpdir_always_is_realpath.assert_not_result_ret", "embedding": null, "metadata": {"file_path": "testing/test_tmpdir.py", "file_name": "test_tmpdir.py", "file_type": "text/x-python", "category": "test", "start_line": 103, "end_line": 121, "span_ids": ["test_tmpdir_always_is_realpath"], "tokens": 200}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tmpdir_always_is_realpath(testdir):\n # the reason why tmpdir should be a realpath is that\n # when you cd to it and do \"os.getcwd()\" you will anyway\n # get the realpath. Using the symlinked path can thus\n # easily result in path-inequality\n # XXX if that proves to be a problem, consider using\n # os.environ[\"PWD\"]\n realtemp = testdir.tmpdir.mkdir(\"myrealtemp\")\n linktemp = testdir.tmpdir.join(\"symlinktemp\")\n attempt_symlink_to(linktemp, str(realtemp))\n p = testdir.makepyfile(\n \"\"\"\n def test_1(tmpdir):\n import os\n assert os.path.realpath(str(tmpdir)) == str(tmpdir)\n \"\"\"\n )\n result = testdir.runpytest(\"-s\", p, \"--basetemp=%s/bt\" % linktemp)\n assert not result.ret", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_test_tmp_path_always_is_realpath_test_tmp_path_always_is_realpath.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_test_tmp_path_always_is_realpath_test_tmp_path_always_is_realpath.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_tmpdir.py", "file_name": "test_tmpdir.py", "file_type": "text/x-python", "category": "test", "start_line": 124, "end_line": 137, "span_ids": ["test_tmp_path_always_is_realpath"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tmp_path_always_is_realpath(testdir, monkeypatch):\n # for reasoning see: test_tmpdir_always_is_realpath test-case\n realtemp = testdir.tmpdir.mkdir(\"myrealtemp\")\n linktemp = testdir.tmpdir.join(\"symlinktemp\")\n attempt_symlink_to(linktemp, str(realtemp))\n monkeypatch.setenv(\"PYTEST_DEBUG_TEMPROOT\", str(linktemp))\n testdir.makepyfile(\n \"\"\"\n def test_1(tmp_path):\n assert tmp_path.resolve() == tmp_path\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_test_tmpdir_too_long_on_parametrization_test_tmpdir_factory.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_test_tmpdir_too_long_on_parametrization_test_tmpdir_factory.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_tmpdir.py", "file_name": "test_tmpdir.py", "file_type": "text/x-python", "category": "test", "start_line": 140, "end_line": 165, "span_ids": ["test_tmpdir_factory", "test_tmpdir_too_long_on_parametrization"], "tokens": 166}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tmpdir_too_long_on_parametrization(testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.mark.parametrize(\"arg\", [\"1\"*1000])\n def test_some(arg, tmpdir):\n tmpdir.ensure(\"hello\")\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)\n\n\ndef test_tmpdir_factory(testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n @pytest.fixture(scope='session')\n def session_dir(tmpdir_factory):\n return tmpdir_factory.mktemp('data', numbered=False)\n def test_some(session_dir):\n assert session_dir.isdir()\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_test_tmpdir_fallback_tox_env_break_getuser.for_envvar_in_LOGNAME_.monkeypatch_delenv_envvar": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_test_tmpdir_fallback_tox_env_break_getuser.for_envvar_in_LOGNAME_.monkeypatch_delenv_envvar", "embedding": null, "metadata": {"file_path": "testing/test_tmpdir.py", "file_name": "test_tmpdir.py", "file_type": "text/x-python", "category": "test", "start_line": 168, "end_line": 190, "span_ids": ["break_getuser", "test_tmpdir_fallback_tox_env"], "tokens": 182}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_tmpdir_fallback_tox_env(testdir, monkeypatch):\n \"\"\"Test that tmpdir works even if environment variables required by getpass\n module are missing (#1010).\n \"\"\"\n monkeypatch.delenv(\"USER\", raising=False)\n monkeypatch.delenv(\"USERNAME\", raising=False)\n testdir.makepyfile(\n \"\"\"\n import pytest\n def test_some(tmpdir):\n assert tmpdir.isdir()\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)\n\n\n@pytest.fixture\ndef break_getuser(monkeypatch):\n monkeypatch.setattr(\"os.getuid\", lambda: -1)\n # taken from python 2.7/3.4\n for envvar in (\"LOGNAME\", \"USER\", \"LNAME\", \"USERNAME\"):\n monkeypatch.delenv(envvar, raising=False)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_test_tmpdir_fallback_uid_not_found_test_tmpdir_fallback_uid_not_found.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_test_tmpdir_fallback_uid_not_found_test_tmpdir_fallback_uid_not_found.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_tmpdir.py", "file_name": "test_tmpdir.py", "file_type": "text/x-python", "category": "test", "start_line": 193, "end_line": 208, "span_ids": ["test_tmpdir_fallback_uid_not_found"], "tokens": 116}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.usefixtures(\"break_getuser\")\n@pytest.mark.skipif(sys.platform.startswith(\"win\"), reason=\"no os.getuid on windows\")\ndef test_tmpdir_fallback_uid_not_found(testdir):\n \"\"\"Test that tmpdir works even if the current process's user id does not\n correspond to a valid user.\n \"\"\"\n\n testdir.makepyfile(\n \"\"\"\n import pytest\n def test_some(tmpdir):\n assert tmpdir.isdir()\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_test_get_user_uid_not_found_test_get_user.assert_get_user_is_None": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_test_get_user_uid_not_found_test_get_user.assert_get_user_is_None", "embedding": null, "metadata": {"file_path": "testing/test_tmpdir.py", "file_name": "test_tmpdir.py", "file_type": "text/x-python", "category": "test", "start_line": 211, "end_line": 233, "span_ids": ["test_get_user", "test_get_user_uid_not_found"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.usefixtures(\"break_getuser\")\n@pytest.mark.skipif(sys.platform.startswith(\"win\"), reason=\"no os.getuid on windows\")\ndef test_get_user_uid_not_found():\n \"\"\"Test that get_user() function works even if the current process's\n user id does not correspond to a valid user (e.g. running pytest in a\n Docker container with 'docker run -u'.\n \"\"\"\n from _pytest.tmpdir import get_user\n\n assert get_user() is None\n\n\n@pytest.mark.skipif(not sys.platform.startswith(\"win\"), reason=\"win only\")\ndef test_get_user(monkeypatch):\n \"\"\"Test that get_user() function works even if environment variables\n required by getpass module are missing from the environment on Windows\n (#1010).\n \"\"\"\n from _pytest.tmpdir import get_user\n\n monkeypatch.delenv(\"USER\", raising=False)\n monkeypatch.delenv(\"USERNAME\", raising=False)\n assert get_user() is None", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_TestNumberedDir_TestNumberedDir.test_removal_accepts_lock.assert_folder_is_dir_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_TestNumberedDir_TestNumberedDir.test_removal_accepts_lock.assert_folder_is_dir_", "embedding": null, "metadata": {"file_path": "testing/test_tmpdir.py", "file_name": "test_tmpdir.py", "file_type": "text/x-python", "category": "test", "start_line": 236, "end_line": 344, "span_ids": ["TestNumberedDir.test_removal_accepts_lock", "TestNumberedDir.test_cleanup_lock_create", "TestNumberedDir.test_lock_register_cleanup_removal", "TestNumberedDir.test_make", "TestNumberedDir.test_rmtree", "TestNumberedDir.test_cleanup_keep", "TestNumberedDir.test_cleanup_locked", "TestNumberedDir.test_cleanup_ignores_symlink", "TestNumberedDir", "TestNumberedDir._do_cleanup"], "tokens": 705}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestNumberedDir(object):\n PREFIX = \"fun-\"\n\n def test_make(self, tmp_path):\n from _pytest.pathlib import make_numbered_dir\n\n for i in range(10):\n d = make_numbered_dir(root=tmp_path, prefix=self.PREFIX)\n assert d.name.startswith(self.PREFIX)\n assert d.name.endswith(str(i))\n\n symlink = tmp_path.joinpath(self.PREFIX + \"current\")\n if symlink.exists():\n # unix\n assert symlink.is_symlink()\n assert symlink.resolve() == d.resolve()\n\n def test_cleanup_lock_create(self, tmp_path):\n d = tmp_path.joinpath(\"test\")\n d.mkdir()\n from _pytest.pathlib import create_cleanup_lock\n\n lockfile = create_cleanup_lock(d)\n with pytest.raises(EnvironmentError, match=\"cannot create lockfile in .*\"):\n create_cleanup_lock(d)\n\n lockfile.unlink()\n\n def test_lock_register_cleanup_removal(self, tmp_path):\n from _pytest.pathlib import create_cleanup_lock, register_cleanup_lock_removal\n\n lock = create_cleanup_lock(tmp_path)\n\n registry = []\n register_cleanup_lock_removal(lock, register=registry.append)\n\n cleanup_func, = registry\n\n assert lock.is_file()\n\n cleanup_func(original_pid=\"intentionally_different\")\n\n assert lock.is_file()\n\n cleanup_func()\n\n assert not lock.exists()\n\n cleanup_func()\n\n assert not lock.exists()\n\n def _do_cleanup(self, tmp_path):\n self.test_make(tmp_path)\n from _pytest.pathlib import cleanup_numbered_dir\n\n cleanup_numbered_dir(\n root=tmp_path,\n prefix=self.PREFIX,\n keep=2,\n consider_lock_dead_if_created_before=0,\n )\n\n def test_cleanup_keep(self, tmp_path):\n self._do_cleanup(tmp_path)\n a, b = (x for x in tmp_path.iterdir() if not x.is_symlink())\n print(a, b)\n\n def test_cleanup_locked(self, tmp_path):\n\n from _pytest import pathlib\n\n p = pathlib.make_numbered_dir(root=tmp_path, prefix=self.PREFIX)\n\n pathlib.create_cleanup_lock(p)\n\n assert not pathlib.ensure_deletable(\n p, consider_lock_dead_if_created_before=p.stat().st_mtime - 1\n )\n assert pathlib.ensure_deletable(\n p, consider_lock_dead_if_created_before=p.stat().st_mtime + 1\n )\n\n def test_rmtree(self, tmp_path):\n from _pytest.pathlib import rmtree\n\n adir = tmp_path / \"adir\"\n adir.mkdir()\n rmtree(adir)\n\n assert not adir.exists()\n\n adir.mkdir()\n afile = adir / \"afile\"\n afile.write_bytes(b\"aa\")\n\n rmtree(adir, force=True)\n assert not adir.exists()\n\n def test_cleanup_ignores_symlink(self, tmp_path):\n the_symlink = tmp_path / (self.PREFIX + \"current\")\n attempt_symlink_to(the_symlink, tmp_path / (self.PREFIX + \"5\"))\n self._do_cleanup(tmp_path)\n\n def test_removal_accepts_lock(self, tmp_path):\n folder = pathlib.make_numbered_dir(root=tmp_path, prefix=self.PREFIX)\n pathlib.create_cleanup_lock(folder)\n pathlib.maybe_delete_a_numbered_dir(folder)\n assert folder.is_dir()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_attempt_symlink_to_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_tmpdir.py_attempt_symlink_to_", "embedding": null, "metadata": {"file_path": "testing/test_tmpdir.py", "file_name": "test_tmpdir.py", "file_type": "text/x-python", "category": "test", "start_line": 347, "end_line": 360, "span_ids": ["attempt_symlink_to", "test_tmpdir_equals_tmp_path"], "tokens": 133}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def attempt_symlink_to(path, to_path):\n \"\"\"Try to make a symlink from \"path\" to \"to_path\", skipping in case this platform\n does not support it or we don't have sufficient privileges (common on Windows).\"\"\"\n if sys.platform.startswith(\"win\") and six.PY2:\n pytest.skip(\"pathlib for some reason cannot make symlinks on Python 2\")\n try:\n Path(path).symlink_to(Path(to_path))\n except OSError:\n pytest.skip(\"could not create symbolic link\")\n\n\ndef test_tmpdir_equals_tmp_path(tmpdir, tmp_path):\n assert Path(tmpdir) == tmp_path", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_from___future___import_ab_test_simple_unittest.None_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_from___future___import_ab_test_simple_unittest.None_1", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 24, "span_ids": ["imports", "test_simple_unittest"], "tokens": 145}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gc\n\nimport pytest\nfrom _pytest.main import EXIT_NOTESTSCOLLECTED\n\n\ndef test_simple_unittest(testdir):\n testpath = testdir.makepyfile(\n \"\"\"\n import unittest\n class MyTestCase(unittest.TestCase):\n def testpassing(self):\n self.assertEqual('foo', 'foo')\n def test_failing(self):\n self.assertEqual('foo', 'bar')\n \"\"\"\n )\n reprec = testdir.inline_run(testpath)\n assert reprec.matchreport(\"testpassing\").passed\n assert reprec.matchreport(\"test_failing\").failed", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_runTest_method_test_isclasscheck_issue53.assert_result_ret_EXIT": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_runTest_method_test_isclasscheck_issue53.assert_result_ret_EXIT", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 27, "end_line": 62, "span_ids": ["test_runTest_method", "test_isclasscheck_issue53"], "tokens": 212}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_runTest_method(testdir):\n testdir.makepyfile(\n \"\"\"\n import unittest\n class MyTestCaseWithRunTest(unittest.TestCase):\n def runTest(self):\n self.assertEqual('foo', 'foo')\n class MyTestCaseWithoutRunTest(unittest.TestCase):\n def runTest(self):\n self.assertEqual('foo', 'foo')\n def test_something(self):\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"-v\")\n result.stdout.fnmatch_lines(\n \"\"\"\n *MyTestCaseWithRunTest::runTest*\n *MyTestCaseWithoutRunTest::test_something*\n *2 passed*\n \"\"\"\n )\n\n\ndef test_isclasscheck_issue53(testdir):\n testpath = testdir.makepyfile(\n \"\"\"\n import unittest\n class _E(object):\n def __getattr__(self, tag):\n pass\n E = _E()\n \"\"\"\n )\n result = testdir.runpytest(testpath)\n assert result.ret == EXIT_NOTESTSCOLLECTED", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_setup_test_setup.assert_rep_failed_and_42": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_setup_test_setup.assert_rep_failed_and_42", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 65, "end_line": 85, "span_ids": ["test_setup"], "tokens": 161}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_setup(testdir):\n testpath = testdir.makepyfile(\n \"\"\"\n import unittest\n class MyTestCase(unittest.TestCase):\n def setUp(self):\n self.foo = 1\n def setup_method(self, method):\n self.foo2 = 1\n def test_both(self):\n self.assertEqual(1, self.foo)\n assert self.foo2 == 1\n def teardown_method(self, method):\n assert 0, \"42\"\n\n \"\"\"\n )\n reprec = testdir.inline_run(\"-s\", testpath)\n assert reprec.matchreport(\"test_both\", when=\"call\").passed\n rep = reprec.matchreport(\"test_both\", when=\"teardown\")\n assert rep.failed and \"42\" in str(rep.longrepr)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_setUpModule_test_setUpModule_failing_no_teardown.assert_not_call_item_modu": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_setUpModule_test_setUpModule_failing_no_teardown.assert_not_call_item_modu", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 88, "end_line": 128, "span_ids": ["test_setUpModule", "test_setUpModule_failing_no_teardown"], "tokens": 207}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_setUpModule(testdir):\n testpath = testdir.makepyfile(\n \"\"\"\n values = []\n\n def setUpModule():\n values.append(1)\n\n def tearDownModule():\n del values[0]\n\n def test_hello():\n assert values == [1]\n\n def test_world():\n assert values == [1]\n \"\"\"\n )\n result = testdir.runpytest(testpath)\n result.stdout.fnmatch_lines([\"*2 passed*\"])\n\n\ndef test_setUpModule_failing_no_teardown(testdir):\n testpath = testdir.makepyfile(\n \"\"\"\n values = []\n\n def setUpModule():\n 0/0\n\n def tearDownModule():\n values.append(1)\n\n def test_hello():\n pass\n \"\"\"\n )\n reprec = testdir.inline_run(testpath)\n reprec.assertoutcome(passed=0, failed=1)\n call = reprec.getcalls(\"pytest_runtest_setup\")[0]\n assert not call.item.module.values", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_new_instances_test_teardown.assert_passed_skipped_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_new_instances_test_teardown.assert_passed_skipped_", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 131, "end_line": 165, "span_ids": ["test_teardown", "test_new_instances"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_new_instances(testdir):\n testpath = testdir.makepyfile(\n \"\"\"\n import unittest\n class MyTestCase(unittest.TestCase):\n def test_func1(self):\n self.x = 2\n def test_func2(self):\n assert not hasattr(self, 'x')\n \"\"\"\n )\n reprec = testdir.inline_run(testpath)\n reprec.assertoutcome(passed=2)\n\n\ndef test_teardown(testdir):\n testpath = testdir.makepyfile(\n \"\"\"\n import unittest\n class MyTestCase(unittest.TestCase):\n values = []\n def test_one(self):\n pass\n def tearDown(self):\n self.values.append(None)\n class Second(unittest.TestCase):\n def test_check(self):\n self.assertEqual(MyTestCase.values, [None])\n \"\"\"\n )\n reprec = testdir.inline_run(testpath)\n passed, skipped, failed = reprec.countoutcomes()\n assert failed == 0, failed\n assert passed == 2\n assert passed + skipped + failed == 2", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_teardown_issue1649_test_teardown_issue1649.for_obj_in_gc_get_objects.assert_type_obj___name__": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_teardown_issue1649_test_teardown_issue1649.for_obj_in_gc_get_objects.assert_type_obj___name__", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 168, "end_line": 190, "span_ids": ["test_teardown_issue1649"], "tokens": 158}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_teardown_issue1649(testdir):\n \"\"\"\n Are TestCase objects cleaned up? Often unittest TestCase objects set\n attributes that are large and expensive during setUp.\n\n The TestCase will not be cleaned up if the test fails, because it\n would then exist in the stackframe.\n \"\"\"\n testpath = testdir.makepyfile(\n \"\"\"\n import unittest\n class TestCaseObjectsShouldBeCleanedUp(unittest.TestCase):\n def setUp(self):\n self.an_expensive_object = 1\n def test_demo(self):\n pass\n\n \"\"\"\n )\n testdir.inline_run(\"-s\", testpath)\n gc.collect()\n for obj in gc.get_objects():\n assert type(obj).__name__ != \"TestCaseObjectsShouldBeCleanedUp\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_unittest_skip_issue148_test_method_and_teardown_failing_reporting.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_unittest_skip_issue148_test_method_and_teardown_failing_reporting.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 193, "end_line": 235, "span_ids": ["test_method_and_teardown_failing_reporting", "test_unittest_skip_issue148"], "tokens": 229}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_unittest_skip_issue148(testdir):\n testpath = testdir.makepyfile(\n \"\"\"\n import unittest\n\n @unittest.skip(\"hello\")\n class MyTestCase(unittest.TestCase):\n @classmethod\n def setUpClass(self):\n xxx\n def test_one(self):\n pass\n @classmethod\n def tearDownClass(self):\n xxx\n \"\"\"\n )\n reprec = testdir.inline_run(testpath)\n reprec.assertoutcome(skipped=1)\n\n\ndef test_method_and_teardown_failing_reporting(testdir):\n testdir.makepyfile(\n \"\"\"\n import unittest, pytest\n class TC(unittest.TestCase):\n def tearDown(self):\n assert 0, \"down1\"\n def test_method(self):\n assert False, \"down2\"\n \"\"\"\n )\n result = testdir.runpytest(\"-s\")\n assert result.ret == 1\n result.stdout.fnmatch_lines(\n [\n \"*tearDown*\",\n \"*assert 0*\",\n \"*test_method*\",\n \"*assert False*\",\n \"*1 failed*1 error*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_setup_failure_is_shown_test_setup_failure_is_shown.assert_never42_not_in_r": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_setup_failure_is_shown_test_setup_failure_is_shown.assert_never42_not_in_r", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 238, "end_line": 254, "span_ids": ["test_setup_failure_is_shown"], "tokens": 119}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_setup_failure_is_shown(testdir):\n testdir.makepyfile(\n \"\"\"\n import unittest\n import pytest\n class TC(unittest.TestCase):\n def setUp(self):\n assert 0, \"down1\"\n def test_method(self):\n print(\"never42\")\n xyz\n \"\"\"\n )\n result = testdir.runpytest(\"-s\")\n assert result.ret == 1\n result.stdout.fnmatch_lines([\"*setUp*\", \"*assert 0*down1*\", \"*1 failed*\"])\n assert \"never42\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_setup_setUpClass_test_setup_setUpClass.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_setup_setUpClass_test_setup_setUpClass.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 257, "end_line": 279, "span_ids": ["test_setup_setUpClass"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_setup_setUpClass(testdir):\n testpath = testdir.makepyfile(\n \"\"\"\n import unittest\n import pytest\n class MyTestCase(unittest.TestCase):\n x = 0\n @classmethod\n def setUpClass(cls):\n cls.x += 1\n def test_func1(self):\n assert self.x == 1\n def test_func2(self):\n assert self.x == 1\n @classmethod\n def tearDownClass(cls):\n cls.x -= 1\n def test_teareddown():\n assert MyTestCase.x == 0\n \"\"\"\n )\n reprec = testdir.inline_run(testpath)\n reprec.assertoutcome(passed=3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_setup_class_test_setup_class.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_setup_class_test_setup_class.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 282, "end_line": 302, "span_ids": ["test_setup_class"], "tokens": 138}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_setup_class(testdir):\n testpath = testdir.makepyfile(\n \"\"\"\n import unittest\n import pytest\n class MyTestCase(unittest.TestCase):\n x = 0\n def setup_class(cls):\n cls.x += 1\n def test_func1(self):\n assert self.x == 1\n def test_func2(self):\n assert self.x == 1\n def teardown_class(cls):\n cls.x -= 1\n def test_teareddown():\n assert MyTestCase.x == 0\n \"\"\"\n )\n reprec = testdir.inline_run(testpath)\n reprec.assertoutcome(passed=3)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_testcase_adderrorandfailure_defers_test_testcase_adderrorandfailure_defers.assert_should_not_raise_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_testcase_adderrorandfailure_defers_test_testcase_adderrorandfailure_defers.assert_should_not_raise_", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 305, "end_line": 326, "span_ids": ["test_testcase_adderrorandfailure_defers"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"type\", [\"Error\", \"Failure\"])\ndef test_testcase_adderrorandfailure_defers(testdir, type):\n testdir.makepyfile(\n \"\"\"\n from unittest import TestCase\n import pytest\n class MyTestCase(TestCase):\n def run(self, result):\n excinfo = pytest.raises(ZeroDivisionError, lambda: 0/0)\n try:\n result.add%s(self, excinfo._excinfo)\n except KeyboardInterrupt:\n raise\n except:\n pytest.fail(\"add%s should not raise\")\n def test_hello(self):\n pass\n \"\"\"\n % (type, type)\n )\n result = testdir.runpytest()\n assert \"should not raise\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_testcase_custom_exception_info_test_testcase_custom_exception_info.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_testcase_custom_exception_info_test_testcase_custom_exception_info.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 329, "end_line": 363, "span_ids": ["test_testcase_custom_exception_info"], "tokens": 230}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"type\", [\"Error\", \"Failure\"])\ndef test_testcase_custom_exception_info(testdir, type):\n testdir.makepyfile(\n \"\"\"\n from unittest import TestCase\n import py, pytest\n import _pytest._code\n class MyTestCase(TestCase):\n def run(self, result):\n excinfo = pytest.raises(ZeroDivisionError, lambda: 0/0)\n # we fake an incompatible exception info\n from _pytest.monkeypatch import MonkeyPatch\n mp = MonkeyPatch()\n def t(*args):\n mp.undo()\n raise TypeError()\n mp.setattr(_pytest._code, 'ExceptionInfo', t)\n try:\n excinfo = excinfo._excinfo\n result.add%(type)s(self, excinfo)\n finally:\n mp.undo()\n def test_hello(self):\n pass\n \"\"\"\n % locals()\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"NOTE: Incompatible Exception Representation*\",\n \"*ZeroDivisionError*\",\n \"*1 failed*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_testcase_totally_incompatible_exception_info_test_module_level_pytestmark.reprec_assertoutcome_skip": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_testcase_totally_incompatible_exception_info_test_module_level_pytestmark.reprec_assertoutcome_skip", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 366, "end_line": 392, "span_ids": ["test_testcase_totally_incompatible_exception_info", "test_module_level_pytestmark"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_testcase_totally_incompatible_exception_info(testdir):\n item, = testdir.getitems(\n \"\"\"\n from unittest import TestCase\n class MyTestCase(TestCase):\n def test_hello(self):\n pass\n \"\"\"\n )\n item.addError(None, 42)\n excinfo = item._excinfo.pop(0)\n assert \"ERROR: Unknown Incompatible\" in str(excinfo.getrepr())\n\n\ndef test_module_level_pytestmark(testdir):\n testpath = testdir.makepyfile(\n \"\"\"\n import unittest\n import pytest\n pytestmark = pytest.mark.xfail\n class MyTestCase(unittest.TestCase):\n def test_func1(self):\n assert 0\n \"\"\"\n )\n reprec = testdir.inline_run(testpath, \"-s\")\n reprec.assertoutcome(skipped=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_TestTrialUnittest_TestTrialUnittest.test_trial_testcase_runtest_not_collected.None_3": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_TestTrialUnittest_TestTrialUnittest.test_trial_testcase_runtest_not_collected.None_3", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 395, "end_line": 424, "span_ids": ["TestTrialUnittest.test_trial_testcase_runtest_not_collected", "TestTrialUnittest", "TestTrialUnittest.setup_class"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTrialUnittest(object):\n def setup_class(cls):\n cls.ut = pytest.importorskip(\"twisted.trial.unittest\")\n # on windows trial uses a socket for a reactor and apparently doesn't close it properly\n # https://twistedmatrix.com/trac/ticket/9227\n cls.ignore_unclosed_socket_warning = (\"-W\", \"always\")\n\n def test_trial_testcase_runtest_not_collected(self, testdir):\n testdir.makepyfile(\n \"\"\"\n from twisted.trial.unittest import TestCase\n\n class TC(TestCase):\n def test_hello(self):\n pass\n \"\"\"\n )\n reprec = testdir.inline_run(*self.ignore_unclosed_socket_warning)\n reprec.assertoutcome(passed=1)\n testdir.makepyfile(\n \"\"\"\n from twisted.trial.unittest import TestCase\n\n class TC(TestCase):\n def runTest(self):\n pass\n \"\"\"\n )\n reprec = testdir.inline_run(*self.ignore_unclosed_socket_warning)\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_TestTrialUnittest.test_trial_exceptions_with_skips_TestTrialUnittest.test_trial_exceptions_with_skips.assert_result_ret_1_i": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_TestTrialUnittest.test_trial_exceptions_with_skips_TestTrialUnittest.test_trial_exceptions_with_skips.assert_result_ret_1_i", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 426, "end_line": 479, "span_ids": ["TestTrialUnittest.test_trial_exceptions_with_skips"], "tokens": 388}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTrialUnittest(object):\n\n def test_trial_exceptions_with_skips(self, testdir):\n testdir.makepyfile(\n \"\"\"\n from twisted.trial import unittest\n import pytest\n class TC(unittest.TestCase):\n def test_hello(self):\n pytest.skip(\"skip_in_method\")\n @pytest.mark.skipif(\"sys.version_info != 1\")\n def test_hello2(self):\n pass\n @pytest.mark.xfail(reason=\"iwanto\")\n def test_hello3(self):\n assert 0\n def test_hello4(self):\n pytest.xfail(\"i2wanto\")\n def test_trial_skip(self):\n pass\n test_trial_skip.skip = \"trialselfskip\"\n\n def test_trial_todo(self):\n assert 0\n test_trial_todo.todo = \"mytodo\"\n\n def test_trial_todo_success(self):\n pass\n test_trial_todo_success.todo = \"mytodo\"\n\n class TC2(unittest.TestCase):\n def setup_class(cls):\n pytest.skip(\"skip_in_setup_class\")\n def test_method(self):\n pass\n \"\"\"\n )\n from _pytest.compat import _is_unittest_unexpected_success_a_failure\n\n should_fail = _is_unittest_unexpected_success_a_failure()\n result = testdir.runpytest(\"-rxs\", *self.ignore_unclosed_socket_warning)\n result.stdout.fnmatch_lines_random(\n [\n \"*XFAIL*test_trial_todo*\",\n \"*trialselfskip*\",\n \"*skip_in_setup_class*\",\n \"*iwanto*\",\n \"*i2wanto*\",\n \"*sys.version_info*\",\n \"*skip_in_method*\",\n \"*1 failed*4 skipped*3 xfailed*\"\n if should_fail\n else \"*4 skipped*3 xfail*1 xpass*\",\n ]\n )\n assert result.ret == (1 if should_fail else 0)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_TestTrialUnittest.test_trial_error_TestTrialUnittest.test_trial_error.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_TestTrialUnittest.test_trial_error_TestTrialUnittest.test_trial_error.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 481, "end_line": 533, "span_ids": ["TestTrialUnittest.test_trial_error"], "tokens": 301}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTrialUnittest(object):\n\n def test_trial_error(self, testdir):\n testdir.makepyfile(\n \"\"\"\n from twisted.trial.unittest import TestCase\n from twisted.internet.defer import Deferred\n from twisted.internet import reactor\n\n class TC(TestCase):\n def test_one(self):\n crash\n\n def test_two(self):\n def f(_):\n crash\n\n d = Deferred()\n d.addCallback(f)\n reactor.callLater(0.3, d.callback, None)\n return d\n\n def test_three(self):\n def f():\n pass # will never get called\n reactor.callLater(0.3, f)\n # will crash at teardown\n\n def test_four(self):\n def f(_):\n reactor.callLater(0.3, f)\n crash\n\n d = Deferred()\n d.addCallback(f)\n reactor.callLater(0.3, d.callback, None)\n return d\n # will crash both at test time and at teardown\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"*ERRORS*\",\n \"*DelayedCalls*\",\n \"*test_four*\",\n \"*NameError*crash*\",\n \"*test_one*\",\n \"*NameError*crash*\",\n \"*test_three*\",\n \"*DelayedCalls*\",\n \"*test_two*\",\n \"*crash*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_TestTrialUnittest.test_trial_pdb_TestTrialUnittest.test_trial_testfunction_todo_property.reprec_assertoutcome_skip": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_TestTrialUnittest.test_trial_pdb_TestTrialUnittest.test_trial_testfunction_todo_property.reprec_assertoutcome_skip", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 535, "end_line": 601, "span_ids": ["TestTrialUnittest.test_trial_testfunction_skip_property", "TestTrialUnittest.test_trial_pdb", "TestTrialUnittest.test_trial_testcase_todo_property", "TestTrialUnittest.test_trial_testfunction_todo_property", "TestTrialUnittest.test_trial_testcase_skip_property"], "tokens": 444}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestTrialUnittest(object):\n\n def test_trial_pdb(self, testdir):\n p = testdir.makepyfile(\n \"\"\"\n from twisted.trial import unittest\n import pytest\n class TC(unittest.TestCase):\n def test_hello(self):\n assert 0, \"hellopdb\"\n \"\"\"\n )\n child = testdir.spawn_pytest(p)\n child.expect(\"hellopdb\")\n child.sendeof()\n\n def test_trial_testcase_skip_property(self, testdir):\n testpath = testdir.makepyfile(\n \"\"\"\n from twisted.trial import unittest\n class MyTestCase(unittest.TestCase):\n skip = 'dont run'\n def test_func(self):\n pass\n \"\"\"\n )\n reprec = testdir.inline_run(testpath, \"-s\")\n reprec.assertoutcome(skipped=1)\n\n def test_trial_testfunction_skip_property(self, testdir):\n testpath = testdir.makepyfile(\n \"\"\"\n from twisted.trial import unittest\n class MyTestCase(unittest.TestCase):\n def test_func(self):\n pass\n test_func.skip = 'dont run'\n \"\"\"\n )\n reprec = testdir.inline_run(testpath, \"-s\")\n reprec.assertoutcome(skipped=1)\n\n def test_trial_testcase_todo_property(self, testdir):\n testpath = testdir.makepyfile(\n \"\"\"\n from twisted.trial import unittest\n class MyTestCase(unittest.TestCase):\n todo = 'dont run'\n def test_func(self):\n assert 0\n \"\"\"\n )\n reprec = testdir.inline_run(testpath, \"-s\")\n reprec.assertoutcome(skipped=1)\n\n def test_trial_testfunction_todo_property(self, testdir):\n testpath = testdir.makepyfile(\n \"\"\"\n from twisted.trial import unittest\n class MyTestCase(unittest.TestCase):\n def test_func(self):\n assert 0\n test_func.todo = 'dont run'\n \"\"\"\n )\n reprec = testdir.inline_run(\n testpath, \"-s\", *self.ignore_unclosed_socket_warning\n )\n reprec.assertoutcome(skipped=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_djangolike_testcase_test_djangolike_testcase.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_djangolike_testcase_test_djangolike_testcase.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 604, "end_line": 659, "span_ids": ["test_djangolike_testcase"], "tokens": 314}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_djangolike_testcase(testdir):\n # contributed from Morten Breekevold\n testdir.makepyfile(\n \"\"\"\n from unittest import TestCase, main\n\n class DjangoLikeTestCase(TestCase):\n\n def setUp(self):\n print(\"setUp()\")\n\n def test_presetup_has_been_run(self):\n print(\"test_thing()\")\n self.assertTrue(hasattr(self, 'was_presetup'))\n\n def tearDown(self):\n print(\"tearDown()\")\n\n def __call__(self, result=None):\n try:\n self._pre_setup()\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception:\n import sys\n result.addError(self, sys.exc_info())\n return\n super(DjangoLikeTestCase, self).__call__(result)\n try:\n self._post_teardown()\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception:\n import sys\n result.addError(self, sys.exc_info())\n return\n\n def _pre_setup(self):\n print(\"_pre_setup()\")\n self.was_presetup = True\n\n def _post_teardown(self):\n print(\"_post_teardown()\")\n \"\"\"\n )\n result = testdir.runpytest(\"-s\")\n assert result.ret == 0\n result.stdout.fnmatch_lines(\n [\n \"*_pre_setup()*\",\n \"*setUp()*\",\n \"*test_thing()*\",\n \"*tearDown()*\",\n \"*_post_teardown()*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_unittest_not_shown_in_traceback_test_unittest_typerror_traceback.assert_result_ret_1": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_unittest_not_shown_in_traceback_test_unittest_typerror_traceback.assert_result_ret_1", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 662, "end_line": 707, "span_ids": ["test_unittest_not_shown_in_traceback", "test_unorderable_types", "test_unittest_typerror_traceback"], "tokens": 254}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_unittest_not_shown_in_traceback(testdir):\n testdir.makepyfile(\n \"\"\"\n import unittest\n class t(unittest.TestCase):\n def test_hello(self):\n x = 3\n self.assertEqual(x, 4)\n \"\"\"\n )\n res = testdir.runpytest()\n assert \"failUnlessEqual\" not in res.stdout.str()\n\n\ndef test_unorderable_types(testdir):\n testdir.makepyfile(\n \"\"\"\n import unittest\n class TestJoinEmpty(unittest.TestCase):\n pass\n\n def make_test():\n class Test(unittest.TestCase):\n pass\n Test.__name__ = \"TestFoo\"\n return Test\n TestFoo = make_test()\n \"\"\"\n )\n result = testdir.runpytest()\n assert \"TypeError\" not in result.stdout.str()\n assert result.ret == EXIT_NOTESTSCOLLECTED\n\n\ndef test_unittest_typerror_traceback(testdir):\n testdir.makepyfile(\n \"\"\"\n import unittest\n class TestJoinEmpty(unittest.TestCase):\n def test_hello(self, arg1):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n assert \"TypeError\" in result.stdout.str()\n assert result.ret == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_unittest_expected_failure_for_failing_test_is_xfail_test_unittest_expected_failure_for_failing_test_is_xfail.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_unittest_expected_failure_for_failing_test_is_xfail_test_unittest_expected_failure_for_failing_test_is_xfail.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 710, "end_line": 731, "span_ids": ["test_unittest_expected_failure_for_failing_test_is_xfail"], "tokens": 180}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"runner\", [\"pytest\", \"unittest\"])\ndef test_unittest_expected_failure_for_failing_test_is_xfail(testdir, runner):\n script = testdir.makepyfile(\n \"\"\"\n import unittest\n class MyTestCase(unittest.TestCase):\n @unittest.expectedFailure\n def test_failing_test_is_xfail(self):\n assert False\n if __name__ == '__main__':\n unittest.main()\n \"\"\"\n )\n if runner == \"pytest\":\n result = testdir.runpytest(\"-rxX\")\n result.stdout.fnmatch_lines(\n [\"*XFAIL*MyTestCase*test_failing_test_is_xfail*\", \"*1 xfailed*\"]\n )\n else:\n result = testdir.runpython(script)\n result.stderr.fnmatch_lines([\"*1 test in*\", \"*OK*(expected failures=1)*\"])\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_unittest_expected_failure_for_passing_test_is_fail_test_unittest_expected_failure_for_passing_test_is_fail.assert_result_ret_1_i": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_unittest_expected_failure_for_passing_test_is_fail_test_unittest_expected_failure_for_passing_test_is_fail.assert_result_ret_1_i", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 734, "end_line": 762, "span_ids": ["test_unittest_expected_failure_for_passing_test_is_fail"], "tokens": 223}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"runner\", [\"pytest\", \"unittest\"])\ndef test_unittest_expected_failure_for_passing_test_is_fail(testdir, runner):\n script = testdir.makepyfile(\n \"\"\"\n import unittest\n class MyTestCase(unittest.TestCase):\n @unittest.expectedFailure\n def test_passing_test_is_fail(self):\n assert True\n if __name__ == '__main__':\n unittest.main()\n \"\"\"\n )\n from _pytest.compat import _is_unittest_unexpected_success_a_failure\n\n should_fail = _is_unittest_unexpected_success_a_failure()\n if runner == \"pytest\":\n result = testdir.runpytest(\"-rxX\")\n result.stdout.fnmatch_lines(\n [\n \"*MyTestCase*test_passing_test_is_fail*\",\n \"*1 failed*\" if should_fail else \"*1 xpassed*\",\n ]\n )\n else:\n result = testdir.runpython(script)\n result.stderr.fnmatch_lines([\"*1 test in*\", \"*(unexpected successes=1)*\"])\n\n assert result.ret == (1 if should_fail else 0)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_unittest_setup_interaction_test_unittest_setup_interaction.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_unittest_setup_interaction_test_unittest_setup_interaction.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 765, "end_line": 797, "span_ids": ["test_unittest_setup_interaction"], "tokens": 239}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\n \"fix_type, stmt\", [(\"fixture\", \"return\"), (\"yield_fixture\", \"yield\")]\n)\ndef test_unittest_setup_interaction(testdir, fix_type, stmt):\n testdir.makepyfile(\n \"\"\"\n import unittest\n import pytest\n class MyTestCase(unittest.TestCase):\n @pytest.{fix_type}(scope=\"class\", autouse=True)\n def perclass(self, request):\n request.cls.hello = \"world\"\n {stmt}\n @pytest.{fix_type}(scope=\"function\", autouse=True)\n def perfunction(self, request):\n request.instance.funcname = request.function.__name__\n {stmt}\n\n def test_method1(self):\n assert self.funcname == \"test_method1\"\n assert self.hello == \"world\"\n\n def test_method2(self):\n assert self.funcname == \"test_method2\"\n\n def test_classattr(self):\n assert self.__class__.hello == \"world\"\n \"\"\".format(\n fix_type=fix_type, stmt=stmt\n )\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*3 passed*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_non_unittest_no_setupclass_support_test_non_unittest_no_setupclass_support.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_non_unittest_no_setupclass_support_test_non_unittest_no_setupclass_support.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 800, "end_line": 823, "span_ids": ["test_non_unittest_no_setupclass_support"], "tokens": 128}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_non_unittest_no_setupclass_support(testdir):\n testpath = testdir.makepyfile(\n \"\"\"\n class TestFoo(object):\n x = 0\n\n @classmethod\n def setUpClass(cls):\n cls.x = 1\n\n def test_method1(self):\n assert self.x == 0\n\n @classmethod\n def tearDownClass(cls):\n cls.x = 1\n\n def test_not_teareddown():\n assert TestFoo.x == 0\n\n \"\"\"\n )\n reprec = testdir.inline_run(testpath)\n reprec.assertoutcome(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_no_teardown_if_setupclass_failed_test_no_teardown_if_setupclass_failed.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_no_teardown_if_setupclass_failed_test_no_teardown_if_setupclass_failed.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 826, "end_line": 851, "span_ids": ["test_no_teardown_if_setupclass_failed"], "tokens": 139}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_no_teardown_if_setupclass_failed(testdir):\n testpath = testdir.makepyfile(\n \"\"\"\n import unittest\n\n class MyTestCase(unittest.TestCase):\n x = 0\n\n @classmethod\n def setUpClass(cls):\n cls.x = 1\n assert False\n\n def test_func1(self):\n cls.x = 10\n\n @classmethod\n def tearDownClass(cls):\n cls.x = 100\n\n def test_notTornDown():\n assert MyTestCase.x == 1\n \"\"\"\n )\n reprec = testdir.inline_run(testpath)\n reprec.assertoutcome(passed=1, failed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_issue333_result_clearing_test_issue333_result_clearing.reprec_assertoutcome_fail": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_issue333_result_clearing_test_issue333_result_clearing.reprec_assertoutcome_fail", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 854, "end_line": 874, "span_ids": ["test_issue333_result_clearing"], "tokens": 110}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_issue333_result_clearing(testdir):\n testdir.makeconftest(\n \"\"\"\n import pytest\n @pytest.hookimpl(hookwrapper=True)\n def pytest_runtest_call(item):\n yield\n assert 0\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n import unittest\n class TestIt(unittest.TestCase):\n def test_func(self):\n 0/0\n \"\"\"\n )\n\n reprec = testdir.inline_run()\n reprec.assertoutcome(failed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_unittest_raise_skip_issue748_test_unittest_raise_skip_issue748.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_unittest_raise_skip_issue748_test_unittest_raise_skip_issue748.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 877, "end_line": 893, "span_ids": ["test_unittest_raise_skip_issue748"], "tokens": 105}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_unittest_raise_skip_issue748(testdir):\n testdir.makepyfile(\n test_foo=\"\"\"\n import unittest\n\n class MyTestCase(unittest.TestCase):\n def test_one(self):\n raise unittest.SkipTest('skipping due to reasons')\n \"\"\"\n )\n result = testdir.runpytest(\"-v\", \"-rs\")\n result.stdout.fnmatch_lines(\n \"\"\"\n *SKIP*[1]*test_foo.py*skipping due to reasons*\n *1 skipped*\n \"\"\"\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_unittest_skip_issue1169_test_class_method_containing_test_issue1558.reprec_assertoutcome_pass": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_unittest_skip_issue1169_test_class_method_containing_test_issue1558.reprec_assertoutcome_pass", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 896, "end_line": 930, "span_ids": ["test_unittest_skip_issue1169", "test_class_method_containing_test_issue1558"], "tokens": 195}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_unittest_skip_issue1169(testdir):\n testdir.makepyfile(\n test_foo=\"\"\"\n import unittest\n\n class MyTestCase(unittest.TestCase):\n @unittest.skip(\"skipping due to reasons\")\n def test_skip(self):\n self.fail()\n \"\"\"\n )\n result = testdir.runpytest(\"-v\", \"-rs\")\n result.stdout.fnmatch_lines(\n \"\"\"\n *SKIP*[1]*skipping due to reasons*\n *1 skipped*\n \"\"\"\n )\n\n\ndef test_class_method_containing_test_issue1558(testdir):\n testdir.makepyfile(\n test_foo=\"\"\"\n import unittest\n\n class MyTestCase(unittest.TestCase):\n def test_should_run(self):\n pass\n def test_should_not_run(self):\n pass\n test_should_not_run.__test__ = False\n \"\"\"\n )\n reprec = testdir.inline_run()\n reprec.assertoutcome(passed=1)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_usefixtures_marker_on_unittest_test_usefixtures_marker_on_unittest.result_assert_outcomes_pa": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_usefixtures_marker_on_unittest_test_usefixtures_marker_on_unittest.result_assert_outcomes_pa", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 933, "end_line": 995, "span_ids": ["test_usefixtures_marker_on_unittest"], "tokens": 353}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.issue(3498)\n@pytest.mark.parametrize(\n \"base\", [\"six.moves.builtins.object\", \"unittest.TestCase\", \"unittest2.TestCase\"]\n)\ndef test_usefixtures_marker_on_unittest(base, testdir):\n module = base.rsplit(\".\", 1)[0]\n pytest.importorskip(module)\n testdir.makepyfile(\n conftest=\"\"\"\n import pytest\n\n @pytest.fixture(scope='function')\n def fixture1(request, monkeypatch):\n monkeypatch.setattr(request.instance, 'fixture1', True )\n\n\n @pytest.fixture(scope='function')\n def fixture2(request, monkeypatch):\n monkeypatch.setattr(request.instance, 'fixture2', True )\n\n def node_and_marks(item):\n print(item.nodeid)\n for mark in item.iter_markers():\n print(\" \", mark)\n\n @pytest.fixture(autouse=True)\n def my_marks(request):\n node_and_marks(request.node)\n\n def pytest_collection_modifyitems(items):\n for item in items:\n node_and_marks(item)\n\n \"\"\"\n )\n\n testdir.makepyfile(\n \"\"\"\n import pytest\n import {module}\n\n class Tests({base}):\n fixture1 = False\n fixture2 = False\n\n @pytest.mark.usefixtures(\"fixture1\")\n def test_one(self):\n assert self.fixture1\n assert not self.fixture2\n\n @pytest.mark.usefixtures(\"fixture1\", \"fixture2\")\n def test_two(self):\n assert self.fixture1\n assert self.fixture2\n\n\n \"\"\".format(\n module=module, base=base\n )\n )\n\n result = testdir.runpytest(\"-s\")\n result.assert_outcomes(passed=2)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_testcase_handles_init_exceptions_test_testcase_handles_init_exceptions.assert_ERROR_at_teardown": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_testcase_handles_init_exceptions_test_testcase_handles_init_exceptions.assert_ERROR_at_teardown", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 998, "end_line": 1016, "span_ids": ["test_testcase_handles_init_exceptions"], "tokens": 148}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_testcase_handles_init_exceptions(testdir):\n \"\"\"\n Regression test to make sure exceptions in the __init__ method are bubbled up correctly.\n See https://github.com/pytest-dev/pytest/issues/3788\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n from unittest import TestCase\n import pytest\n class MyTestCase(TestCase):\n def __init__(self, *args, **kwargs):\n raise Exception(\"should raise this exception\")\n def test_hello(self):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n assert \"should raise this exception\" in result.stdout.str()\n assert \"ERROR at teardown of MyTestCase.test_hello\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_error_message_with_parametrized_fixtures_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_unittest.py_test_error_message_with_parametrized_fixtures_", "embedding": null, "metadata": {"file_path": "testing/test_unittest.py", "file_name": "test_unittest.py", "file_type": "text/x-python", "category": "test", "start_line": 1019, "end_line": 1044, "span_ids": ["test_error_message_with_parametrized_fixtures", "test_setup_inheritance_skipping"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_error_message_with_parametrized_fixtures(testdir):\n testdir.copy_example(\"unittest/test_parametrized_fixture_error_message.py\")\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"*test_two does not support fixtures*\",\n \"*TestSomethingElse::test_two\",\n \"*Function type: TestCaseFunction\",\n ]\n )\n\n\n@pytest.mark.parametrize(\n \"test_name, expected_outcome\",\n [\n (\"test_setup_skip.py\", \"1 skipped\"),\n (\"test_setup_skip_class.py\", \"1 skipped\"),\n (\"test_setup_skip_module.py\", \"1 error\"),\n ],\n)\ndef test_setup_inheritance_skipping(testdir, test_name, expected_outcome):\n \"\"\"Issue #4700\"\"\"\n testdir.copy_example(\"unittest/{}\".format(test_name))\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"* {} in *\".format(expected_outcome)])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py__coding_utf8__pyfile_with_warnings.testdir_makepyfile_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py__coding_utf8__pyfile_with_warnings.testdir_makepyfile_", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 39, "span_ids": ["imports", "pyfile_with_warnings", "docstring", "impl"], "tokens": 196}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "# -*- coding: utf8 -*-\nfrom __future__ import unicode_literals\n\nimport sys\nimport warnings\n\nimport six\n\nimport pytest\n\nWARNINGS_SUMMARY_HEADER = \"warnings summary\"\n\n\n@pytest.fixture\ndef pyfile_with_warnings(testdir, request):\n \"\"\"\n Create a test file which calls a function in a module which generates warnings.\n \"\"\"\n testdir.syspathinsert()\n test_name = request.function.__name__\n module_name = test_name.lstrip(\"test_\") + \"_module\"\n testdir.makepyfile(\n **{\n module_name: \"\"\"\n import warnings\n def foo():\n warnings.warn(UserWarning(\"user warning\"))\n warnings.warn(RuntimeWarning(\"runtime warning\"))\n return 1\n \"\"\",\n test_name: \"\"\"\n import {module_name}\n def test_func():\n assert {module_name}.foo() == 1\n \"\"\".format(\n module_name=module_name\n ),\n }\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_normal_flow_test_normal_flow.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_normal_flow_test_normal_flow.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 42, "end_line": 58, "span_ids": ["test_normal_flow"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"default\")\ndef test_normal_flow(testdir, pyfile_with_warnings):\n \"\"\"\n Check that the warnings section is displayed.\n \"\"\"\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"*== %s ==*\" % WARNINGS_SUMMARY_HEADER,\n \"test_normal_flow.py::test_func\",\n \"*normal_flow_module.py:3: UserWarning: user warning\",\n '* warnings.warn(UserWarning(\"user warning\"))',\n \"*normal_flow_module.py:4: RuntimeWarning: runtime warning\",\n '* warnings.warn(RuntimeWarning(\"runtime warning\"))',\n \"* 1 passed, 2 warnings*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_setup_teardown_warnings_test_setup_teardown_warnings.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_setup_teardown_warnings_test_setup_teardown_warnings.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 61, "end_line": 88, "span_ids": ["test_setup_teardown_warnings"], "tokens": 190}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"always\")\ndef test_setup_teardown_warnings(testdir, pyfile_with_warnings):\n testdir.makepyfile(\n \"\"\"\n import warnings\n import pytest\n\n @pytest.fixture\n def fix():\n warnings.warn(UserWarning(\"warning during setup\"))\n yield\n warnings.warn(UserWarning(\"warning during teardown\"))\n\n def test_func(fix):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"*== %s ==*\" % WARNINGS_SUMMARY_HEADER,\n \"*test_setup_teardown_warnings.py:6: UserWarning: warning during setup\",\n '*warnings.warn(UserWarning(\"warning during setup\"))',\n \"*test_setup_teardown_warnings.py:8: UserWarning: warning during teardown\",\n '*warnings.warn(UserWarning(\"warning during teardown\"))',\n \"* 1 passed, 2 warnings*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_as_errors_test_as_errors.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_as_errors_test_as_errors.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 91, "end_line": 110, "span_ids": ["test_as_errors"], "tokens": 149}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"method\", [\"cmdline\", \"ini\"])\ndef test_as_errors(testdir, pyfile_with_warnings, method):\n args = (\"-W\", \"error\") if method == \"cmdline\" else ()\n if method == \"ini\":\n testdir.makeini(\n \"\"\"\n [pytest]\n filterwarnings=error\n \"\"\"\n )\n # Use a subprocess, since changing logging level affects other threads\n # (xdist).\n result = testdir.runpytest_subprocess(*args)\n result.stdout.fnmatch_lines(\n [\n \"E UserWarning: user warning\",\n \"as_errors_module.py:3: UserWarning\",\n \"* 1 failed in *\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_ignore_test_ignore.assert_WARNINGS_SUMMARY_H": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_ignore_test_ignore.assert_WARNINGS_SUMMARY_H", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 113, "end_line": 126, "span_ids": ["test_ignore"], "tokens": 110}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"method\", [\"cmdline\", \"ini\"])\ndef test_ignore(testdir, pyfile_with_warnings, method):\n args = (\"-W\", \"ignore\") if method == \"cmdline\" else ()\n if method == \"ini\":\n testdir.makeini(\n \"\"\"\n [pytest]\n filterwarnings= ignore\n \"\"\"\n )\n\n result = testdir.runpytest(*args)\n result.stdout.fnmatch_lines([\"* 1 passed in *\"])\n assert WARNINGS_SUMMARY_HEADER not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_unicode_test_unicode.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_unicode_test_unicode.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 129, "end_line": 157, "span_ids": ["test_unicode"], "tokens": 174}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n sys.version_info < (3, 0), reason=\"warnings message is unicode is ok in python3\"\n)\n@pytest.mark.filterwarnings(\"always\")\ndef test_unicode(testdir, pyfile_with_warnings):\n testdir.makepyfile(\n \"\"\"\n # -*- coding: utf8 -*-\n import warnings\n import pytest\n\n\n @pytest.fixture\n def fix():\n warnings.warn(u\"\u6d4b\u8bd5\")\n yield\n\n def test_func(fix):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"*== %s ==*\" % WARNINGS_SUMMARY_HEADER,\n \"*test_unicode.py:8: UserWarning: \\u6d4b\\u8bd5*\",\n \"* 1 passed, 1 warnings*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_py2_unicode_test_py2_unicode.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_py2_unicode_test_py2_unicode.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 160, "end_line": 195, "span_ids": ["test_py2_unicode"], "tokens": 271}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(\n sys.version_info >= (3, 0),\n reason=\"warnings message is broken as it is not str instance\",\n)\ndef test_py2_unicode(testdir, pyfile_with_warnings):\n if getattr(sys, \"pypy_version_info\", ())[:2] == (5, 9) and sys.platform.startswith(\n \"win\"\n ):\n pytest.xfail(\"fails with unicode error on PyPy2 5.9 and Windows (#2905)\")\n testdir.makepyfile(\n \"\"\"\n # -*- coding: utf8 -*-\n import warnings\n import pytest\n\n\n @pytest.fixture\n def fix():\n warnings.warn(u\"\u6d4b\u8bd5\")\n yield\n\n @pytest.mark.filterwarnings('always')\n def test_func(fix):\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"*== %s ==*\" % WARNINGS_SUMMARY_HEADER,\n \"*test_py2_unicode.py:8: UserWarning: \\\\u6d4b\\\\u8bd5\",\n '*warnings.warn(u\"\\u6d4b\\u8bd5\")',\n \"*warnings.py:*: UnicodeWarning: Warning is using unicode non*\",\n \"* 1 passed, 2 warnings*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_py2_unicode_ascii_test_py2_unicode_ascii.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_py2_unicode_ascii_test_py2_unicode_ascii.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 198, "end_line": 219, "span_ids": ["test_py2_unicode_ascii"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_py2_unicode_ascii(testdir):\n \"\"\"Ensure that our warning about 'unicode warnings containing non-ascii messages'\n does not trigger with ascii-convertible messages\"\"\"\n testdir.makeini(\"[pytest]\")\n testdir.makepyfile(\n \"\"\"\n import pytest\n import warnings\n\n @pytest.mark.filterwarnings('always')\n def test_func():\n warnings.warn(u\"hello\")\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"*== %s ==*\" % WARNINGS_SUMMARY_HEADER,\n '*warnings.warn(u\"hello\")',\n \"* 1 passed, 1 warnings in*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_works_with_filterwarnings_test_works_with_filterwarnings.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_works_with_filterwarnings_test_works_with_filterwarnings.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 222, "end_line": 243, "span_ids": ["test_works_with_filterwarnings"], "tokens": 129}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_works_with_filterwarnings(testdir):\n \"\"\"Ensure our warnings capture does not mess with pre-installed filters (#2430).\"\"\"\n testdir.makepyfile(\n \"\"\"\n import warnings\n\n class MyWarning(Warning):\n pass\n\n warnings.filterwarnings(\"error\", category=MyWarning)\n\n class TestWarnings(object):\n def test_my_warning(self):\n try:\n warnings.warn(MyWarning(\"warn!\"))\n assert False\n except MyWarning:\n assert True\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*== 1 passed in *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_filterwarnings_mark_test_filterwarnings_mark.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_filterwarnings_mark_test_filterwarnings_mark.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 246, "end_line": 276, "span_ids": ["test_filterwarnings_mark"], "tokens": 199}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"default_config\", [\"ini\", \"cmdline\"])\ndef test_filterwarnings_mark(testdir, default_config):\n \"\"\"\n Test ``filterwarnings`` mark works and takes precedence over command line and ini options.\n \"\"\"\n if default_config == \"ini\":\n testdir.makeini(\n \"\"\"\n [pytest]\n filterwarnings = always\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n import warnings\n import pytest\n\n @pytest.mark.filterwarnings('ignore::RuntimeWarning')\n def test_ignore_runtime_warning():\n warnings.warn(RuntimeWarning())\n\n @pytest.mark.filterwarnings('error')\n def test_warning_error():\n warnings.warn(RuntimeWarning())\n\n def test_show_warning():\n warnings.warn(RuntimeWarning())\n \"\"\"\n )\n result = testdir.runpytest(\"-W always\" if default_config == \"cmdline\" else \"\")\n result.stdout.fnmatch_lines([\"*= 1 failed, 2 passed, 1 warnings in *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_non_string_warning_argument_test_filterwarnings_mark_registration.assert_result_ret_0": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_non_string_warning_argument_test_filterwarnings_mark_registration.assert_result_ret_0", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 279, "end_line": 306, "span_ids": ["test_non_string_warning_argument", "test_filterwarnings_mark_registration"], "tokens": 162}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_non_string_warning_argument(testdir):\n \"\"\"Non-str argument passed to warning breaks pytest (#2956)\"\"\"\n testdir.makepyfile(\n \"\"\"\n import warnings\n import pytest\n\n def test():\n warnings.warn(UserWarning(1, u'foo'))\n \"\"\"\n )\n result = testdir.runpytest(\"-W\", \"always\")\n result.stdout.fnmatch_lines([\"*= 1 passed, 1 warnings in *\"])\n\n\ndef test_filterwarnings_mark_registration(testdir):\n \"\"\"Ensure filterwarnings mark is registered\"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n @pytest.mark.filterwarnings('error')\n def test_func():\n pass\n \"\"\"\n )\n result = testdir.runpytest(\"--strict\")\n assert result.ret == 0", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_warning_captured_hook_test_warning_captured_hook.assert_collected_expec": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_warning_captured_hook_test_warning_captured_hook.assert_collected_expec", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 309, "end_line": 353, "span_ids": ["test_warning_captured_hook.WarningCollector", "test_warning_captured_hook.WarningCollector.pytest_warning_captured", "test_warning_captured_hook"], "tokens": 302}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"always\")\ndef test_warning_captured_hook(testdir):\n testdir.makeconftest(\n \"\"\"\n from _pytest.warnings import _issue_warning_captured\n def pytest_configure(config):\n _issue_warning_captured(UserWarning(\"config warning\"), config.hook, stacklevel=2)\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n import pytest, warnings\n\n warnings.warn(UserWarning(\"collect warning\"))\n\n @pytest.fixture\n def fix():\n warnings.warn(UserWarning(\"setup warning\"))\n yield 1\n warnings.warn(UserWarning(\"teardown warning\"))\n\n def test_func(fix):\n warnings.warn(UserWarning(\"call warning\"))\n assert fix == 1\n \"\"\"\n )\n\n collected = []\n\n class WarningCollector:\n def pytest_warning_captured(self, warning_message, when, item):\n imge_name = item.name if item is not None else \"\"\n collected.append((str(warning_message.message), when, imge_name))\n\n result = testdir.runpytest(plugins=[WarningCollector()])\n result.stdout.fnmatch_lines([\"*1 passed*\"])\n\n expected = [\n (\"config warning\", \"config\", \"\"),\n (\"collect warning\", \"collect\", \"\"),\n (\"setup warning\", \"runtest\", \"test_func\"),\n (\"call warning\", \"runtest\", \"test_func\"),\n (\"teardown warning\", \"runtest\", \"test_func\"),\n ]\n assert collected == expected", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_collection_warnings_test_collection_warnings.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_collection_warnings_test_collection_warnings.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 356, "end_line": 379, "span_ids": ["test_collection_warnings"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"always\")\ndef test_collection_warnings(testdir):\n \"\"\"\n Check that we also capture warnings issued during test collection (#3251).\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n import warnings\n\n warnings.warn(UserWarning(\"collection warning\"))\n\n def test_foo():\n pass\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"*== %s ==*\" % WARNINGS_SUMMARY_HEADER,\n \" *collection_warnings.py:3: UserWarning: collection warning\",\n ' warnings.warn(UserWarning(\"collection warning\"))',\n \"* 1 passed, 1 warnings*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_mark_regex_escape_test_mark_regex_escape.assert_WARNINGS_SUMMARY_H": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_mark_regex_escape_test_mark_regex_escape.assert_WARNINGS_SUMMARY_H", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 382, "end_line": 395, "span_ids": ["test_mark_regex_escape"], "tokens": 107}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"always\")\ndef test_mark_regex_escape(testdir):\n \"\"\"@pytest.mark.filterwarnings should not try to escape regex characters (#3936)\"\"\"\n testdir.makepyfile(\n r\"\"\"\n import pytest, warnings\n\n @pytest.mark.filterwarnings(r\"ignore:some \\(warning\\)\")\n def test_foo():\n warnings.warn(UserWarning(\"some (warning)\"))\n \"\"\"\n )\n result = testdir.runpytest()\n assert WARNINGS_SUMMARY_HEADER not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_hide_pytest_internal_warnings_test_hide_pytest_internal_warnings.None_1.else_.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_hide_pytest_internal_warnings_test_hide_pytest_internal_warnings.None_1.else_.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 398, "end_line": 435, "span_ids": ["test_hide_pytest_internal_warnings"], "tokens": 265}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.filterwarnings(\"default\")\n@pytest.mark.parametrize(\"ignore_pytest_warnings\", [\"no\", \"ini\", \"cmdline\"])\ndef test_hide_pytest_internal_warnings(testdir, ignore_pytest_warnings):\n \"\"\"Make sure we can ignore internal pytest warnings using a warnings filter.\"\"\"\n testdir.makepyfile(\n \"\"\"\n import pytest\n import warnings\n\n warnings.warn(pytest.PytestWarning(\"some internal warning\"))\n\n def test_bar():\n pass\n \"\"\"\n )\n if ignore_pytest_warnings == \"ini\":\n testdir.makeini(\n \"\"\"\n [pytest]\n filterwarnings = ignore::pytest.PytestWarning\n \"\"\"\n )\n args = (\n [\"-W\", \"ignore::pytest.PytestWarning\"]\n if ignore_pytest_warnings == \"cmdline\"\n else []\n )\n result = testdir.runpytest(*args)\n if ignore_pytest_warnings != \"no\":\n assert WARNINGS_SUMMARY_HEADER not in result.stdout.str()\n else:\n result.stdout.fnmatch_lines(\n [\n \"*== %s ==*\" % WARNINGS_SUMMARY_HEADER,\n \"*test_hide_pytest_internal_warnings.py:4: PytestWarning: some internal warning\",\n \"* 1 passed, 1 warnings *\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_option_precedence_cmdline_over_ini_test_option_precedence_cmdline_over_ini.if_ignore_on_cmdline_.else_.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_option_precedence_cmdline_over_ini_test_option_precedence_cmdline_over_ini.if_ignore_on_cmdline_.else_.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 438, "end_line": 459, "span_ids": ["test_option_precedence_cmdline_over_ini"], "tokens": 169}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"ignore_on_cmdline\", [True, False])\ndef test_option_precedence_cmdline_over_ini(testdir, ignore_on_cmdline):\n \"\"\"filters defined in the command-line should take precedence over filters in ini files (#3946).\"\"\"\n testdir.makeini(\n \"\"\"\n [pytest]\n filterwarnings = error\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n import warnings\n def test():\n warnings.warn(UserWarning('hello'))\n \"\"\"\n )\n args = [\"-W\", \"ignore\"] if ignore_on_cmdline else []\n result = testdir.runpytest(*args)\n if ignore_on_cmdline:\n result.stdout.fnmatch_lines([\"* 1 passed in*\"])\n else:\n result.stdout.fnmatch_lines([\"* 1 failed in*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_option_precedence_mark_test_option_precedence_mark.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_option_precedence_mark_test_option_precedence_mark.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 462, "end_line": 479, "span_ids": ["test_option_precedence_mark"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_option_precedence_mark(testdir):\n \"\"\"Filters defined by marks should always take precedence (#3946).\"\"\"\n testdir.makeini(\n \"\"\"\n [pytest]\n filterwarnings = ignore\n \"\"\"\n )\n testdir.makepyfile(\n \"\"\"\n import pytest, warnings\n @pytest.mark.filterwarnings('error')\n def test():\n warnings.warn(UserWarning('hello'))\n \"\"\"\n )\n result = testdir.runpytest(\"-W\", \"ignore\")\n result.stdout.fnmatch_lines([\"* 1 failed in*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_TestDeprecationWarningsByDefault_TestDeprecationWarningsByDefault.create_file.testdir_makepyfile_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_TestDeprecationWarningsByDefault_TestDeprecationWarningsByDefault.create_file.testdir_makepyfile_", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 482, "end_line": 501, "span_ids": ["TestDeprecationWarningsByDefault", "TestDeprecationWarningsByDefault.create_file"], "tokens": 114}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDeprecationWarningsByDefault:\n \"\"\"\n Note: all pytest runs are executed in a subprocess so we don't inherit warning filters\n from pytest's own test suite\n \"\"\"\n\n def create_file(self, testdir, mark=\"\"):\n testdir.makepyfile(\n \"\"\"\n import pytest, warnings\n\n warnings.warn(DeprecationWarning(\"collection\"))\n\n {mark}\n def test_foo():\n warnings.warn(PendingDeprecationWarning(\"test run\"))\n \"\"\".format(\n mark=mark\n )\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_TestDeprecationWarningsByDefault.test_shown_by_default_TestDeprecationWarningsByDefault.test_shown_by_default.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_TestDeprecationWarningsByDefault.test_shown_by_default_TestDeprecationWarningsByDefault.test_shown_by_default.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 503, "end_line": 523, "span_ids": ["TestDeprecationWarningsByDefault.test_shown_by_default"], "tokens": 184}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDeprecationWarningsByDefault:\n\n @pytest.mark.parametrize(\"customize_filters\", [True, False])\n def test_shown_by_default(self, testdir, customize_filters):\n \"\"\"Show deprecation warnings by default, even if user has customized the warnings filters (#4013).\"\"\"\n self.create_file(testdir)\n if customize_filters:\n testdir.makeini(\n \"\"\"\n [pytest]\n filterwarnings =\n once::UserWarning\n \"\"\"\n )\n result = testdir.runpytest_subprocess()\n result.stdout.fnmatch_lines(\n [\n \"*== %s ==*\" % WARNINGS_SUMMARY_HEADER,\n \"*test_shown_by_default.py:3: DeprecationWarning: collection\",\n \"*test_shown_by_default.py:7: PendingDeprecationWarning: test run\",\n \"* 1 passed, 2 warnings*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_TestDeprecationWarningsByDefault.test_hidden_by_ini_TestDeprecationWarningsByDefault.test_hidden_by_mark.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_TestDeprecationWarningsByDefault.test_hidden_by_ini_TestDeprecationWarningsByDefault.test_hidden_by_mark.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 525, "end_line": 553, "span_ids": ["TestDeprecationWarningsByDefault.test_hidden_by_mark", "TestDeprecationWarningsByDefault.test_hidden_by_ini"], "tokens": 218}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDeprecationWarningsByDefault:\n\n def test_hidden_by_ini(self, testdir):\n self.create_file(testdir)\n testdir.makeini(\n \"\"\"\n [pytest]\n filterwarnings =\n ignore::DeprecationWarning\n ignore::PendingDeprecationWarning\n \"\"\"\n )\n result = testdir.runpytest_subprocess()\n assert WARNINGS_SUMMARY_HEADER not in result.stdout.str()\n\n def test_hidden_by_mark(self, testdir):\n \"\"\"Should hide the deprecation warning from the function, but the warning during collection should\n be displayed normally.\n \"\"\"\n self.create_file(\n testdir,\n mark='@pytest.mark.filterwarnings(\"ignore::PendingDeprecationWarning\")',\n )\n result = testdir.runpytest_subprocess()\n result.stdout.fnmatch_lines(\n [\n \"*== %s ==*\" % WARNINGS_SUMMARY_HEADER,\n \"*test_hidden_by_mark.py:3: DeprecationWarning: collection\",\n \"* 1 passed, 1 warnings*\",\n ]\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_TestDeprecationWarningsByDefault.test_hidden_by_cmdline_TestDeprecationWarningsByDefault.test_hidden_by_system.assert_WARNINGS_SUMMARY_H": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_TestDeprecationWarningsByDefault.test_hidden_by_cmdline_TestDeprecationWarningsByDefault.test_hidden_by_system.assert_WARNINGS_SUMMARY_H", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 555, "end_line": 569, "span_ids": ["TestDeprecationWarningsByDefault.test_hidden_by_system", "TestDeprecationWarningsByDefault.test_hidden_by_cmdline"], "tokens": 142}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestDeprecationWarningsByDefault:\n\n def test_hidden_by_cmdline(self, testdir):\n self.create_file(testdir)\n result = testdir.runpytest_subprocess(\n \"-W\",\n \"ignore::DeprecationWarning\",\n \"-W\",\n \"ignore::PendingDeprecationWarning\",\n )\n assert WARNINGS_SUMMARY_HEADER not in result.stdout.str()\n\n def test_hidden_by_system(self, testdir, monkeypatch):\n self.create_file(testdir)\n monkeypatch.setenv(str(\"PYTHONWARNINGS\"), str(\"once::UserWarning\"))\n result = testdir.runpytest_subprocess()\n assert WARNINGS_SUMMARY_HEADER not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_infinite_loop_warning_against_unicode_usage_py2_test_infinite_loop_warning_against_unicode_usage_py2.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_infinite_loop_warning_against_unicode_usage_py2_test_infinite_loop_warning_against_unicode_usage_py2.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 572, "end_line": 596, "span_ids": ["test_infinite_loop_warning_against_unicode_usage_py2"], "tokens": 192}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.skipif(six.PY3, reason=\"Python 2 only issue\")\ndef test_infinite_loop_warning_against_unicode_usage_py2(testdir):\n \"\"\"\n We need to be careful when raising the warning about unicode usage with \"warnings.warn\"\n because it might be overwritten by users and this itself causes another warning (#3691).\n \"\"\"\n testdir.makepyfile(\n \"\"\"\n # -*- coding: utf8 -*-\n from __future__ import unicode_literals\n import warnings\n import pytest\n\n def _custom_showwarning(message, *a, **b):\n return \"WARNING: {}\".format(message)\n\n warnings.formatwarning = _custom_showwarning\n\n @pytest.mark.filterwarnings(\"default\")\n def test_custom_warning_formatter():\n warnings.warn(\"\u00a5\")\n \"\"\"\n )\n result = testdir.runpytest_subprocess()\n result.stdout.fnmatch_lines([\"*1 passed, * warnings in*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_removed_in_pytest4_warning_as_error_test_removed_in_pytest4_warning_as_error.if_change_default_is_None.else_.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_removed_in_pytest4_warning_as_error_test_removed_in_pytest4_warning_as_error.if_change_default_is_None.else_.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 599, "end_line": 627, "span_ids": ["test_removed_in_pytest4_warning_as_error"], "tokens": 205}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.mark.parametrize(\"change_default\", [None, \"ini\", \"cmdline\"])\ndef test_removed_in_pytest4_warning_as_error(testdir, change_default):\n testdir.makepyfile(\n \"\"\"\n import warnings, pytest\n def test():\n warnings.warn(pytest.RemovedInPytest4Warning(\"some warning\"))\n \"\"\"\n )\n if change_default == \"ini\":\n testdir.makeini(\n \"\"\"\n [pytest]\n filterwarnings =\n ignore::pytest.RemovedInPytest4Warning\n \"\"\"\n )\n\n args = (\n (\"-Wignore::pytest.RemovedInPytest4Warning\",)\n if change_default == \"cmdline\"\n else ()\n )\n result = testdir.runpytest(*args)\n if change_default is None:\n result.stdout.fnmatch_lines([\"* 1 failed in *\"])\n else:\n assert change_default in (\"ini\", \"cmdline\")\n result.stdout.fnmatch_lines([\"* 1 passed in *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_TestAssertionWarnings_TestAssertionWarnings.test_false_function_no_warn.result_stdout_fnmatch_lin": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_TestAssertionWarnings_TestAssertionWarnings.test_false_function_no_warn.result_stdout_fnmatch_lin", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 630, "end_line": 687, "span_ids": ["TestAssertionWarnings.assert_result_warns", "TestAssertionWarnings.test_false_function_no_warn", "TestAssertionWarnings", "TestAssertionWarnings.test_none_function_warns", "TestAssertionWarnings.test_assert_is_none_no_warn", "TestAssertionWarnings.test_tuple_warning", "TestAssertionWarnings.create_file"], "tokens": 345}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TestAssertionWarnings:\n @staticmethod\n def assert_result_warns(result, msg):\n result.stdout.fnmatch_lines([\"*PytestAssertRewriteWarning: %s*\" % msg])\n\n def test_tuple_warning(self, testdir):\n testdir.makepyfile(\n \"\"\"\n def test_foo():\n assert (1,2)\n \"\"\"\n )\n result = testdir.runpytest()\n self.assert_result_warns(\n result, \"assertion is always true, perhaps remove parentheses?\"\n )\n\n @staticmethod\n def create_file(testdir, return_none):\n testdir.makepyfile(\n \"\"\"\n def foo(return_none):\n if return_none:\n return None\n else:\n return False\n\n def test_foo():\n assert foo({return_none})\n \"\"\".format(\n return_none=return_none\n )\n )\n\n def test_none_function_warns(self, testdir):\n self.create_file(testdir, True)\n result = testdir.runpytest()\n self.assert_result_warns(\n result, 'asserting the value None, please use \"assert is None\"'\n )\n\n def test_assert_is_none_no_warn(self, testdir):\n testdir.makepyfile(\n \"\"\"\n def foo():\n return None\n\n def test_foo():\n assert foo() is None\n \"\"\"\n )\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*1 passed in*\"])\n\n def test_false_function_no_warn(self, testdir):\n self.create_file(testdir, False)\n result = testdir.runpytest()\n result.stdout.fnmatch_lines([\"*1 failed in*\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_warnings_checker_twice_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_warnings.py_test_warnings_checker_twice_", "embedding": null, "metadata": {"file_path": "testing/test_warnings.py", "file_name": "test_warnings.py", "file_type": "text/x-python", "category": "test", "start_line": 690, "end_line": 716, "span_ids": ["test_group_warnings_by_message", "test_warnings_checker_twice"], "tokens": 225}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_warnings_checker_twice():\n \"\"\"Issue #4617\"\"\"\n expectation = pytest.warns(UserWarning)\n with expectation:\n warnings.warn(\"Message A\", UserWarning)\n with expectation:\n warnings.warn(\"Message B\", UserWarning)\n\n\n@pytest.mark.filterwarnings(\"always\")\ndef test_group_warnings_by_message(testdir):\n testdir.copy_example(\"warnings/test_group_warnings_by_message.py\")\n result = testdir.runpytest()\n result.stdout.fnmatch_lines(\n [\n \"test_group_warnings_by_message.py::test_foo[0]\",\n \"test_group_warnings_by_message.py::test_foo[1]\",\n \"test_group_warnings_by_message.py::test_foo[2]\",\n \"test_group_warnings_by_message.py::test_foo[3]\",\n \"test_group_warnings_by_message.py::test_foo[4]\",\n \"test_group_warnings_by_message.py::test_bar\",\n ]\n )\n warning_code = 'warnings.warn(UserWarning(\"foo\"))'\n assert warning_code in result.stdout.str()\n assert result.stdout.str().count(warning_code) == 1", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/failure_demo.py_six_TestFailing.test_not.assert_not_f_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/doc/en/example/assertion/failure_demo.py_six_TestFailing.test_not.assert_not_f_", "embedding": null, "metadata": {"file_path": "doc/en/example/assertion/failure_demo.py", "file_name": "failure_demo.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 42, "span_ids": ["TestFailing", "TestFailing.test_simple", "test_generative", "TestFailing.test_simple_multiline", "otherfunc", "imports", "TestFailing.test_not", "somefunc", "otherfunc_multi"], "tokens": 173}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "import six\n\nimport _pytest._code\nimport pytest\nfrom pytest import raises\n\n\ndef otherfunc(a, b):\n assert a == b\n\n\ndef somefunc(x, y):\n otherfunc(x, y)\n\n\ndef otherfunc_multi(a, b):\n assert a == b\n\n\n@pytest.mark.parametrize(\"param1, param2\", [(3, 6)])\ndef test_generative(param1, param2):\n assert param1 * 2 < param2\n\n\nclass TestFailing(object):\n def test_simple(self):\n def f():\n return 42\n\n def g():\n return 43\n\n assert f() == g()\n\n def test_simple_multiline(self):\n otherfunc_multi(42, 6 * 9)\n\n def test_not(self):\n def f():\n return 42\n\n assert not f()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_record_xml_attribute_record_xml_attribute.return.attr_func": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/junitxml.py_record_xml_attribute_record_xml_attribute.return.attr_func", "embedding": null, "metadata": {"file_path": "src/_pytest/junitxml.py", "file_name": "junitxml.py", "file_type": "text/x-python", "category": "implementation", "start_line": 320, "end_line": 345, "span_ids": ["record_xml_attribute"], "tokens": 172}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "@pytest.fixture\ndef record_xml_attribute(request):\n \"\"\"Add extra xml attributes to the tag for the calling test.\n The fixture is callable with ``(name, value)``, with value being\n automatically xml-encoded\n \"\"\"\n from _pytest.warning_types import PytestExperimentalApiWarning\n\n request.node.warn(\n PytestExperimentalApiWarning(\"record_xml_attribute is an experimental feature\")\n )\n\n _warn_incompatibility_with_xunit2(request, \"record_xml_attribute\")\n\n # Declare noop\n def add_attr_noop(name, value):\n pass\n\n attr_func = add_attr_noop\n\n xml = getattr(request.config, \"_xml\", None)\n if xml is not None:\n node_reporter = xml.node_reporter(request.node.nodeid)\n attr_func = node_reporter.add_attribute\n\n return attr_func", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_pytest_addoption_pytest_addoption.None_13": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_pytest_addoption_pytest_addoption.None_13", "embedding": null, "metadata": {"file_path": "src/_pytest/main.py", "file_name": "main.py", "file_type": "text/x-python", "category": "implementation", "start_line": 36, "end_line": 141, "span_ids": ["pytest_addoption"], "tokens": 756}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_addoption(parser):\n parser.addini(\n \"norecursedirs\",\n \"directory patterns to avoid for recursion\",\n type=\"args\",\n default=[\".*\", \"build\", \"dist\", \"CVS\", \"_darcs\", \"{arch}\", \"*.egg\", \"venv\"],\n )\n parser.addini(\n \"testpaths\",\n \"directories to search for tests when no files or directories are given in the \"\n \"command line.\",\n type=\"args\",\n default=[],\n )\n # parser.addini(\"dirpatterns\",\n # \"patterns specifying possible locations of test files\",\n # type=\"linelist\", default=[\"**/test_*.txt\",\n # \"**/test_*.py\", \"**/*_test.py\"]\n # )\n group = parser.getgroup(\"general\", \"running and selection options\")\n group._addoption(\n \"-x\",\n \"--exitfirst\",\n action=\"store_const\",\n dest=\"maxfail\",\n const=1,\n help=\"exit instantly on first error or failed test.\",\n ),\n group._addoption(\n \"--maxfail\",\n metavar=\"num\",\n action=\"store\",\n type=int,\n dest=\"maxfail\",\n default=0,\n help=\"exit after first num failures or errors.\",\n )\n group._addoption(\n \"--strict\",\n action=\"store_true\",\n help=\"marks not registered in configuration file raise errors.\",\n )\n group._addoption(\n \"-c\",\n metavar=\"file\",\n type=str,\n dest=\"inifilename\",\n help=\"load configuration from `file` instead of trying to locate one of the implicit \"\n \"configuration files.\",\n )\n group._addoption(\n \"--continue-on-collection-errors\",\n action=\"store_true\",\n default=False,\n dest=\"continue_on_collection_errors\",\n help=\"Force test execution even if collection errors occur.\",\n )\n group._addoption(\n \"--rootdir\",\n action=\"store\",\n dest=\"rootdir\",\n help=\"Define root directory for tests. Can be relative path: 'root_dir', './root_dir', \"\n \"'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: \"\n \"'$HOME/root_dir'.\",\n )\n\n group = parser.getgroup(\"collect\", \"collection\")\n group.addoption(\n \"--collectonly\",\n \"--collect-only\",\n action=\"store_true\",\n help=\"only collect tests, don't execute them.\",\n ),\n group.addoption(\n \"--pyargs\",\n action=\"store_true\",\n help=\"try to interpret all arguments as python packages.\",\n )\n group.addoption(\n \"--ignore\",\n action=\"append\",\n metavar=\"path\",\n help=\"ignore path during collection (multi-allowed).\",\n )\n group.addoption(\n \"--ignore-glob\",\n action=\"append\",\n metavar=\"path\",\n help=\"ignore path pattern during collection (multi-allowed).\",\n )\n group.addoption(\n \"--deselect\",\n action=\"append\",\n metavar=\"nodeid_prefix\",\n help=\"deselect item during collection (multi-allowed).\",\n )\n # when changing this to --conf-cut-dir, config.py Conftest.setinitial\n # needs upgrading as well\n group.addoption(\n \"--confcutdir\",\n dest=\"confcutdir\",\n default=None,\n metavar=\"dir\",\n type=functools.partial(directory_arg, optname=\"--confcutdir\"),\n help=\"only load conftest.py's relative to specified dir.\",\n )\n # ... other code", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_pytest_addoption.None_14_pytest_addoption.None_17": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/main.py_pytest_addoption.None_14_pytest_addoption.None_17", "embedding": null, "metadata": {"file_path": "src/_pytest/main.py", "file_name": "main.py", "file_type": "text/x-python", "category": "implementation", "start_line": 142, "end_line": 175, "span_ids": ["pytest_addoption"], "tokens": 220}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def pytest_addoption(parser):\n # ... other code\n group.addoption(\n \"--noconftest\",\n action=\"store_true\",\n dest=\"noconftest\",\n default=False,\n help=\"Don't load any conftest.py files.\",\n )\n group.addoption(\n \"--keepduplicates\",\n \"--keep-duplicates\",\n action=\"store_true\",\n dest=\"keepduplicates\",\n default=False,\n help=\"Keep duplicate tests.\",\n )\n group.addoption(\n \"--collect-in-virtualenv\",\n action=\"store_true\",\n dest=\"collect_in_virtualenv\",\n default=False,\n help=\"Don't ignore tests in a local virtualenv directory\",\n )\n\n group = parser.getgroup(\"debugconfig\", \"test session debugging and configuration\")\n group.addoption(\n \"--basetemp\",\n dest=\"basetemp\",\n default=None,\n metavar=\"dir\",\n help=(\n \"base temporary directory for this test run.\"\n \"(warning: this directory is removed if it exists)\"\n ),\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/skipping.py__support_for_skip_xfai_pytest_addoption.parser_addini_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/skipping.py__support_for_skip_xfai_pytest_addoption.parser_addini_", "embedding": null, "metadata": {"file_path": "src/_pytest/skipping.py", "file_name": "skipping.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 29, "span_ids": ["imports", "pytest_addoption", "docstring"], "tokens": 185}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" support for skip/xfail functions and markers. \"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom _pytest.config import hookimpl\nfrom _pytest.mark.evaluate import MarkEvaluator\nfrom _pytest.outcomes import fail\nfrom _pytest.outcomes import skip\nfrom _pytest.outcomes import xfail\n\n\ndef pytest_addoption(parser):\n group = parser.getgroup(\"general\")\n group.addoption(\n \"--runxfail\",\n action=\"store_true\",\n dest=\"runxfail\",\n default=False,\n help=\"report the results of xfail tests as if they were not marked\",\n )\n\n parser.addini(\n \"xfail_strict\",\n \"default for the strict parameter of xfail \"\n \"markers when not given explicitly (default: False)\",\n default=False,\n type=\"bool\",\n )", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py__terminal_reporting_of_REPORT_COLLECTING_RESOLUTION.0_5": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py__terminal_reporting_of_REPORT_COLLECTING_RESOLUTION.0_5", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 1, "end_line": 30, "span_ids": ["imports", "docstring", "impl"], "tokens": 153}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\" terminal reporting of the full testing process.\n\nThis is a good source for looking at the various reporting hooks.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport collections\nimport platform\nimport sys\nimport time\nfrom functools import partial\n\nimport attr\nimport pluggy\nimport py\nimport six\nfrom more_itertools import collapse\n\nimport pytest\nfrom _pytest import nodes\nfrom _pytest.main import EXIT_INTERRUPTED\nfrom _pytest.main import EXIT_NOTESTSCOLLECTED\nfrom _pytest.main import EXIT_OK\nfrom _pytest.main import EXIT_TESTSFAILED\nfrom _pytest.main import EXIT_USAGEERROR\n\nREPORT_COLLECTING_RESOLUTION = 0.5", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.short_test_summary._get_pos_TerminalReporter.short_test_summary.if_lines_.for_line_in_lines_.self_write_line_line_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/src/_pytest/terminal.py_TerminalReporter.short_test_summary._get_pos_TerminalReporter.short_test_summary.if_lines_.for_line_in_lines_.self_write_line_line_", "embedding": null, "metadata": {"file_path": "src/_pytest/terminal.py", "file_name": "terminal.py", "file_type": "text/x-python", "category": "implementation", "start_line": 930, "end_line": 954, "span_ids": ["TerminalReporter.short_test_summary"], "tokens": 202}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "class TerminalReporter(object):\n\n def short_test_summary(self):\n # ... other code\n\n def _get_pos(config, rep):\n nodeid = config.cwd_relative_nodeid(rep.nodeid)\n return nodeid\n\n REPORTCHAR_ACTIONS = {\n \"x\": show_xfailed,\n \"X\": show_xpassed,\n \"f\": partial(show_simple, \"failed\"),\n \"F\": partial(show_simple, \"failed\"),\n \"s\": show_skipped,\n \"S\": show_skipped,\n \"p\": partial(show_simple, \"passed\"),\n \"E\": partial(show_simple, \"error\"),\n }\n\n lines = []\n for char in self.reportchars:\n action = REPORTCHAR_ACTIONS.get(char)\n if action: # skipping e.g. \"P\" (passed with output) here.\n action(lines)\n\n if lines:\n self.write_sep(\"=\", \"short test summary info\")\n for line in lines:\n self.write_line(line)", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_parameterset_for_parametrize_bad_markname_test_addmarker_order.assert_extracted_c_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_mark.py_test_parameterset_for_parametrize_bad_markname_test_addmarker_order.assert_extracted_c_", "embedding": null, "metadata": {"file_path": "testing/test_mark.py", "file_name": "test_mark.py", "file_type": "text/x-python", "category": "test", "start_line": 894, "end_line": 940, "span_ids": ["test_mark_expressions_no_smear", "test_addmarker_order", "test_parameterset_for_parametrize_bad_markname"], "tokens": 346}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_parameterset_for_parametrize_bad_markname(testdir):\n with pytest.raises(pytest.UsageError):\n test_parameterset_for_parametrize_marks(testdir, \"bad\")\n\n\ndef test_mark_expressions_no_smear(testdir):\n testdir.makepyfile(\n \"\"\"\n import pytest\n\n class BaseTests(object):\n def test_something(self):\n pass\n\n @pytest.mark.FOO\n class TestFooClass(BaseTests):\n pass\n\n @pytest.mark.BAR\n class TestBarClass(BaseTests):\n pass\n \"\"\"\n )\n\n reprec = testdir.inline_run(\"-m\", \"FOO\")\n passed, skipped, failed = reprec.countoutcomes()\n dlist = reprec.getcalls(\"pytest_deselected\")\n assert passed == 1\n assert skipped == failed == 0\n deselected_tests = dlist[0].items\n assert len(deselected_tests) == 1\n\n # todo: fixed\n # keywords smear - expected behaviour\n # reprec_keywords = testdir.inline_run(\"-k\", \"FOO\")\n # passed_k, skipped_k, failed_k = reprec_keywords.countoutcomes()\n # assert passed_k == 2\n # assert skipped_k == failed_k == 0\n\n\ndef test_addmarker_order():\n node = Node(\"Test\", config=mock.Mock(), session=mock.Mock(), nodeid=\"Test\")\n node.add_marker(\"a\")\n node.add_marker(\"b\")\n node.add_marker(\"c\", append=False)\n extracted = [x.name for x in node.iter_markers()]\n assert extracted == [\"c\", \"a\", \"b\"]", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_test_trace_after_runpytest_test_trace_after_runpytest.assert_child_exitstatus_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_test_trace_after_runpytest_test_trace_after_runpytest.assert_child_exitstatus_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 1021, "end_line": 1045, "span_ids": ["test_trace_after_runpytest"], "tokens": 188}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_trace_after_runpytest(testdir):\n \"\"\"Test that debugging's pytest_configure is re-entrant.\"\"\"\n p1 = testdir.makepyfile(\n \"\"\"\n from _pytest.debugging import pytestPDB\n\n def test_outer(testdir):\n from _pytest.debugging import pytestPDB\n\n assert len(pytestPDB._saved) == 1\n\n testdir.runpytest(\"-k test_inner\")\n\n __import__('pdb').set_trace()\n\n def test_inner(testdir):\n assert len(pytestPDB._saved) == 2\n \"\"\"\n )\n child = testdir.spawn_pytest(\"-p pytester %s -k test_outer\" % p1)\n child.expect(r\"\\(Pdb\")\n child.sendline(\"c\")\n rest = child.read().decode(\"utf8\")\n TestPDB.flush(child)\n assert child.exitstatus == 0, rest", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_test_pdbcls_via_local_module_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_pdb.py_test_pdbcls_via_local_module_", "embedding": null, "metadata": {"file_path": "testing/test_pdb.py", "file_name": "test_pdb.py", "file_type": "text/x-python", "category": "test", "start_line": 1125, "end_line": 1166, "span_ids": ["test_pdbcls_via_local_module"], "tokens": 340}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_pdbcls_via_local_module(testdir):\n \"\"\"It should be imported in pytest_configure or later only.\"\"\"\n p1 = testdir.makepyfile(\n \"\"\"\n def test():\n print(\"before_settrace\")\n __import__(\"pdb\").set_trace()\n \"\"\",\n mypdb=\"\"\"\n class Wrapped:\n class MyPdb:\n def set_trace(self, *args):\n print(\"settrace_called\", args)\n\n def runcall(self, *args, **kwds):\n print(\"runcall_called\", args, kwds)\n assert \"func\" in kwds\n \"\"\",\n )\n result = testdir.runpytest(\n str(p1), \"--pdbcls=really.invalid:Value\", syspathinsert=True\n )\n result.stderr.fnmatch_lines(\n [\n \"ERROR: --pdbcls: could not import 'really.invalid:Value': No module named *really*\"\n ]\n )\n assert result.ret == 4\n\n result = testdir.runpytest(\n str(p1), \"--pdbcls=mypdb:Wrapped.MyPdb\", syspathinsert=True\n )\n assert result.ret == 0\n result.stdout.fnmatch_lines([\"*settrace_called*\", \"* 1 passed in *\"])\n\n # Ensure that it also works with --trace.\n result = testdir.runpytest(\n str(p1), \"--pdbcls=mypdb:Wrapped.MyPdb\", \"--trace\", syspathinsert=True\n )\n assert result.ret == 0\n result.stdout.fnmatch_lines([\"*runcall_called*\", \"* 1 passed in *\"])", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_from___future___import_ab_TestEvaluator.test_marked_one_arg.assert_expl_condition": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_skipping.py_from___future___import_ab_TestEvaluator.test_marked_one_arg.assert_expl_condition", "embedding": null, "metadata": {"file_path": "testing/test_skipping.py", "file_name": "test_skipping.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 49, "span_ids": ["TestEvaluator.test_marked_one_arg", "TestEvaluator.test_no_marker", "TestEvaluator", "imports", "TestEvaluator.test_marked_no_args"], "tokens": 298}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport sys\n\nimport pytest\nfrom _pytest.runner import runtestprotocol\nfrom _pytest.skipping import MarkEvaluator\nfrom _pytest.skipping import pytest_runtest_setup\n\n\nclass TestEvaluator(object):\n def test_no_marker(self, testdir):\n item = testdir.getitem(\"def test_func(): pass\")\n evalskipif = MarkEvaluator(item, \"skipif\")\n assert not evalskipif\n assert not evalskipif.istrue()\n\n def test_marked_no_args(self, testdir):\n item = testdir.getitem(\n \"\"\"\n import pytest\n @pytest.mark.xyz\n def test_func():\n pass\n \"\"\"\n )\n ev = MarkEvaluator(item, \"xyz\")\n assert ev\n assert ev.istrue()\n expl = ev.getexplanation()\n assert expl == \"\"\n assert not ev.get(\"run\", False)\n\n def test_marked_one_arg(self, testdir):\n item = testdir.getitem(\n \"\"\"\n import pytest\n @pytest.mark.xyz(\"hasattr(os, 'sep')\")\n def test_func():\n pass\n \"\"\"\n )\n ev = MarkEvaluator(item, \"xyz\")\n assert ev\n assert ev.istrue()\n expl = ev.getexplanation()\n assert expl == \"condition: hasattr(os, 'sep')\"", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py___option.return.request_param": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py___option.return.request_param", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 1, "end_line": 52, "span_ids": ["Option", "impl", "Option.args", "docstring", "option", "imports"], "tokens": 285}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "\"\"\"\nterminal reporting of the full testing process.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport os\nimport sys\nimport textwrap\n\nimport pluggy\nimport py\n\nimport pytest\nfrom _pytest.main import EXIT_NOTESTSCOLLECTED\nfrom _pytest.reports import BaseReport\nfrom _pytest.terminal import _folded_skips\nfrom _pytest.terminal import _plugin_nameversions\nfrom _pytest.terminal import build_summary_stats_line\nfrom _pytest.terminal import getreportopt\nfrom _pytest.terminal import TerminalReporter\n\nDistInfo = collections.namedtuple(\"DistInfo\", [\"project_name\", \"version\"])\n\n\nclass Option(object):\n def __init__(self, verbosity=0, fulltrace=False):\n self.verbosity = verbosity\n self.fulltrace = fulltrace\n\n @property\n def args(self):\n values = []\n values.append(\"--verbosity=%d\" % self.verbosity)\n if self.fulltrace:\n values.append(\"--fulltrace\")\n return values\n\n\n@pytest.fixture(\n params=[\n Option(verbosity=0),\n Option(verbosity=1),\n Option(verbosity=-1),\n Option(fulltrace=True),\n ],\n ids=[\"default\", \"verbose\", \"quiet\", \"fulltrace\"],\n)\ndef option(request):\n return request.param", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_fail_extra_reporting_test_pass_reporting_on_fail.assert_short_test_summar": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_fail_extra_reporting_test_pass_reporting_on_fail.assert_short_test_summar", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 761, "end_line": 786, "span_ids": ["test_fail_reporting_on_pass", "test_pass_extra_reporting", "test_pass_reporting_on_fail", "test_fail_extra_reporting"], "tokens": 244}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_fail_extra_reporting(testdir):\n testdir.makepyfile(\"def test_this(): assert 0\")\n result = testdir.runpytest()\n assert \"short test summary\" not in result.stdout.str()\n result = testdir.runpytest(\"-rf\")\n result.stdout.fnmatch_lines([\"*test summary*\", \"FAIL*test_fail_extra_reporting*\"])\n\n\ndef test_fail_reporting_on_pass(testdir):\n testdir.makepyfile(\"def test_this(): assert 1\")\n result = testdir.runpytest(\"-rf\")\n assert \"short test summary\" not in result.stdout.str()\n\n\ndef test_pass_extra_reporting(testdir):\n testdir.makepyfile(\"def test_this(): assert 1\")\n result = testdir.runpytest()\n assert \"short test summary\" not in result.stdout.str()\n result = testdir.runpytest(\"-rp\")\n result.stdout.fnmatch_lines([\"*test summary*\", \"PASS*test_pass_extra_reporting*\"])\n\n\ndef test_pass_reporting_on_fail(testdir):\n testdir.makepyfile(\"def test_this(): assert 0\")\n result = testdir.runpytest(\"-rp\")\n assert \"short test summary\" not in result.stdout.str()", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}, "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_skip_reasons_folding_": {"__data__": {"id_": "/tmp/repos/swe-bench_pytest-dev__pytest/testing/test_terminal.py_test_skip_reasons_folding_", "embedding": null, "metadata": {"file_path": "testing/test_terminal.py", "file_name": "test_terminal.py", "file_type": "text/x-python", "category": "test", "start_line": 1578, "end_line": 1610, "span_ids": ["test_skip_reasons_folding", "test_skip_reasons_folding.X", "test_skip_reasons_folding.X:2"], "tokens": 215}, "excluded_embed_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date", "start_line", "end_line", "tokens"], "excluded_llm_metadata_keys": ["file_name", "file_type", "file_size", "creation_date", "last_modified_date", "last_accessed_date"], "relationships": {}, "text": "def test_skip_reasons_folding():\n path = \"xyz\"\n lineno = 3\n message = \"justso\"\n longrepr = (path, lineno, message)\n\n class X(object):\n pass\n\n ev1 = X()\n ev1.when = \"execute\"\n ev1.skipped = True\n ev1.longrepr = longrepr\n\n ev2 = X()\n ev2.when = \"execute\"\n ev2.longrepr = longrepr\n ev2.skipped = True\n\n # ev3 might be a collection report\n ev3 = X()\n ev3.when = \"collect\"\n ev3.longrepr = longrepr\n ev3.skipped = True\n\n values = _folded_skips([ev1, ev2, ev3])\n assert len(values) == 1\n num, fspath, lineno, reason = values[0]\n assert num == 3\n assert fspath == path\n assert lineno == lineno\n assert reason == message", "start_char_idx": null, "end_char_idx": null, "text_template": "{metadata_str}\n\n{content}", "metadata_template": "{key}: {value}", "metadata_seperator": "\n", "class_name": "TextNode"}, "__type__": "1"}}} \ No newline at end of file