Spaces:
Running
Running
Upload 7228 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +18 -0
- llm/Include/site/python3.11/greenlet/greenlet.h +164 -0
- llm/Lib/site-packages/GitPython-3.1.43.dist-info/AUTHORS +58 -0
- llm/Lib/site-packages/GitPython-3.1.43.dist-info/INSTALLER +1 -0
- llm/Lib/site-packages/GitPython-3.1.43.dist-info/LICENSE +29 -0
- llm/Lib/site-packages/GitPython-3.1.43.dist-info/METADATA +297 -0
- llm/Lib/site-packages/GitPython-3.1.43.dist-info/RECORD +82 -0
- llm/Lib/site-packages/GitPython-3.1.43.dist-info/WHEEL +5 -0
- llm/Lib/site-packages/GitPython-3.1.43.dist-info/top_level.txt +1 -0
- llm/Lib/site-packages/Jinja2-3.1.3.dist-info/INSTALLER +1 -0
- llm/Lib/site-packages/Jinja2-3.1.3.dist-info/LICENSE.rst +28 -0
- llm/Lib/site-packages/Jinja2-3.1.3.dist-info/METADATA +105 -0
- llm/Lib/site-packages/Jinja2-3.1.3.dist-info/RECORD +58 -0
- llm/Lib/site-packages/Jinja2-3.1.3.dist-info/WHEEL +5 -0
- llm/Lib/site-packages/Jinja2-3.1.3.dist-info/entry_points.txt +2 -0
- llm/Lib/site-packages/Jinja2-3.1.3.dist-info/top_level.txt +1 -0
- llm/Lib/site-packages/accelerate-0.29.3.dist-info/INSTALLER +1 -0
- llm/Lib/site-packages/accelerate-0.29.3.dist-info/LICENSE +201 -0
- llm/Lib/site-packages/accelerate-0.29.3.dist-info/METADATA +378 -0
- llm/Lib/site-packages/accelerate-0.29.3.dist-info/RECORD +164 -0
- llm/Lib/site-packages/accelerate-0.29.3.dist-info/REQUESTED +0 -0
- llm/Lib/site-packages/accelerate-0.29.3.dist-info/WHEEL +5 -0
- llm/Lib/site-packages/accelerate-0.29.3.dist-info/entry_points.txt +5 -0
- llm/Lib/site-packages/accelerate-0.29.3.dist-info/top_level.txt +1 -0
- llm/Lib/site-packages/accelerate/__init__.py +48 -0
- llm/Lib/site-packages/accelerate/__pycache__/__init__.cpython-311.pyc +0 -0
- llm/Lib/site-packages/accelerate/__pycache__/accelerator.cpython-311.pyc +0 -0
- llm/Lib/site-packages/accelerate/__pycache__/big_modeling.cpython-311.pyc +0 -0
- llm/Lib/site-packages/accelerate/__pycache__/checkpointing.cpython-311.pyc +0 -0
- llm/Lib/site-packages/accelerate/__pycache__/data_loader.cpython-311.pyc +0 -0
- llm/Lib/site-packages/accelerate/__pycache__/hooks.cpython-311.pyc +0 -0
- llm/Lib/site-packages/accelerate/__pycache__/inference.cpython-311.pyc +0 -0
- llm/Lib/site-packages/accelerate/__pycache__/launchers.cpython-311.pyc +0 -0
- llm/Lib/site-packages/accelerate/__pycache__/local_sgd.cpython-311.pyc +0 -0
- llm/Lib/site-packages/accelerate/__pycache__/logging.cpython-311.pyc +0 -0
- llm/Lib/site-packages/accelerate/__pycache__/memory_utils.cpython-311.pyc +0 -0
- llm/Lib/site-packages/accelerate/__pycache__/optimizer.cpython-311.pyc +0 -0
- llm/Lib/site-packages/accelerate/__pycache__/scheduler.cpython-311.pyc +0 -0
- llm/Lib/site-packages/accelerate/__pycache__/state.cpython-311.pyc +0 -0
- llm/Lib/site-packages/accelerate/__pycache__/tracking.cpython-311.pyc +0 -0
- llm/Lib/site-packages/accelerate/accelerator.py +0 -0
- llm/Lib/site-packages/accelerate/big_modeling.py +627 -0
- llm/Lib/site-packages/accelerate/checkpointing.py +275 -0
- llm/Lib/site-packages/accelerate/commands/__init__.py +13 -0
- llm/Lib/site-packages/accelerate/commands/__pycache__/__init__.cpython-311.pyc +0 -0
- llm/Lib/site-packages/accelerate/commands/__pycache__/accelerate_cli.cpython-311.pyc +0 -0
- llm/Lib/site-packages/accelerate/commands/__pycache__/env.cpython-311.pyc +0 -0
- llm/Lib/site-packages/accelerate/commands/__pycache__/estimate.cpython-311.pyc +0 -0
- llm/Lib/site-packages/accelerate/commands/__pycache__/launch.cpython-311.pyc +0 -0
- llm/Lib/site-packages/accelerate/commands/__pycache__/test.cpython-311.pyc +0 -0
.gitattributes
CHANGED
@@ -34,3 +34,21 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
data/Processed[[:space:]]Data.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
data/Processed[[:space:]]Data.pdf filter=lfs diff=lfs merge=lfs -text
|
37 |
+
llm/Lib/site-packages/altair/vegalite/v5/schema/__pycache__/channels.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
38 |
+
llm/Lib/site-packages/altair/vegalite/v5/schema/__pycache__/core.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
39 |
+
llm/Lib/site-packages/cassandra/cluster.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
40 |
+
llm/Lib/site-packages/ctransformers/lib/avx/ctransformers.dll filter=lfs diff=lfs merge=lfs -text
|
41 |
+
llm/Lib/site-packages/ctransformers/lib/avx/libctransformers.dylib filter=lfs diff=lfs merge=lfs -text
|
42 |
+
llm/Lib/site-packages/ctransformers/lib/avx/libctransformers.so filter=lfs diff=lfs merge=lfs -text
|
43 |
+
llm/Lib/site-packages/ctransformers/lib/avx2/ctransformers.dll filter=lfs diff=lfs merge=lfs -text
|
44 |
+
llm/Lib/site-packages/ctransformers/lib/avx2/libctransformers.dylib filter=lfs diff=lfs merge=lfs -text
|
45 |
+
llm/Lib/site-packages/ctransformers/lib/avx2/libctransformers.so filter=lfs diff=lfs merge=lfs -text
|
46 |
+
llm/Lib/site-packages/ctransformers/lib/basic/ctransformers.dll filter=lfs diff=lfs merge=lfs -text
|
47 |
+
llm/Lib/site-packages/ctransformers/lib/basic/libctransformers.dylib filter=lfs diff=lfs merge=lfs -text
|
48 |
+
llm/Lib/site-packages/ctransformers/lib/basic/libctransformers.so filter=lfs diff=lfs merge=lfs -text
|
49 |
+
llm/Lib/site-packages/ctransformers/lib/cuda/ctransformers.dll filter=lfs diff=lfs merge=lfs -text
|
50 |
+
llm/Lib/site-packages/ctransformers/lib/cuda/libctransformers.so filter=lfs diff=lfs merge=lfs -text
|
51 |
+
llm/Lib/site-packages/faiss_cpu.libs/flang-d38962844214aa9b06fc3989f9adae5b.dll filter=lfs diff=lfs merge=lfs -text
|
52 |
+
llm/Lib/site-packages/faiss_cpu.libs/openblas-1ba25ee8d70fa3c45ede15bdc95fbee3.dll filter=lfs diff=lfs merge=lfs -text
|
53 |
+
llm/Lib/site-packages/faiss/_swigfaiss_avx2.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
54 |
+
llm/Lib/site-packages/faiss/_swigfaiss.cp311-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text
|
llm/Include/site/python3.11/greenlet/greenlet.h
ADDED
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */
|
2 |
+
|
3 |
+
/* Greenlet object interface */
|
4 |
+
|
5 |
+
#ifndef Py_GREENLETOBJECT_H
|
6 |
+
#define Py_GREENLETOBJECT_H
|
7 |
+
|
8 |
+
|
9 |
+
#include <Python.h>
|
10 |
+
|
11 |
+
#ifdef __cplusplus
|
12 |
+
extern "C" {
|
13 |
+
#endif
|
14 |
+
|
15 |
+
/* This is deprecated and undocumented. It does not change. */
|
16 |
+
#define GREENLET_VERSION "1.0.0"
|
17 |
+
|
18 |
+
#ifndef GREENLET_MODULE
|
19 |
+
#define implementation_ptr_t void*
|
20 |
+
#endif
|
21 |
+
|
22 |
+
typedef struct _greenlet {
|
23 |
+
PyObject_HEAD
|
24 |
+
PyObject* weakreflist;
|
25 |
+
PyObject* dict;
|
26 |
+
implementation_ptr_t pimpl;
|
27 |
+
} PyGreenlet;
|
28 |
+
|
29 |
+
#define PyGreenlet_Check(op) (op && PyObject_TypeCheck(op, &PyGreenlet_Type))
|
30 |
+
|
31 |
+
|
32 |
+
/* C API functions */
|
33 |
+
|
34 |
+
/* Total number of symbols that are exported */
|
35 |
+
#define PyGreenlet_API_pointers 12
|
36 |
+
|
37 |
+
#define PyGreenlet_Type_NUM 0
|
38 |
+
#define PyExc_GreenletError_NUM 1
|
39 |
+
#define PyExc_GreenletExit_NUM 2
|
40 |
+
|
41 |
+
#define PyGreenlet_New_NUM 3
|
42 |
+
#define PyGreenlet_GetCurrent_NUM 4
|
43 |
+
#define PyGreenlet_Throw_NUM 5
|
44 |
+
#define PyGreenlet_Switch_NUM 6
|
45 |
+
#define PyGreenlet_SetParent_NUM 7
|
46 |
+
|
47 |
+
#define PyGreenlet_MAIN_NUM 8
|
48 |
+
#define PyGreenlet_STARTED_NUM 9
|
49 |
+
#define PyGreenlet_ACTIVE_NUM 10
|
50 |
+
#define PyGreenlet_GET_PARENT_NUM 11
|
51 |
+
|
52 |
+
#ifndef GREENLET_MODULE
|
53 |
+
/* This section is used by modules that uses the greenlet C API */
|
54 |
+
static void** _PyGreenlet_API = NULL;
|
55 |
+
|
56 |
+
# define PyGreenlet_Type \
|
57 |
+
(*(PyTypeObject*)_PyGreenlet_API[PyGreenlet_Type_NUM])
|
58 |
+
|
59 |
+
# define PyExc_GreenletError \
|
60 |
+
((PyObject*)_PyGreenlet_API[PyExc_GreenletError_NUM])
|
61 |
+
|
62 |
+
# define PyExc_GreenletExit \
|
63 |
+
((PyObject*)_PyGreenlet_API[PyExc_GreenletExit_NUM])
|
64 |
+
|
65 |
+
/*
|
66 |
+
* PyGreenlet_New(PyObject *args)
|
67 |
+
*
|
68 |
+
* greenlet.greenlet(run, parent=None)
|
69 |
+
*/
|
70 |
+
# define PyGreenlet_New \
|
71 |
+
(*(PyGreenlet * (*)(PyObject * run, PyGreenlet * parent)) \
|
72 |
+
_PyGreenlet_API[PyGreenlet_New_NUM])
|
73 |
+
|
74 |
+
/*
|
75 |
+
* PyGreenlet_GetCurrent(void)
|
76 |
+
*
|
77 |
+
* greenlet.getcurrent()
|
78 |
+
*/
|
79 |
+
# define PyGreenlet_GetCurrent \
|
80 |
+
(*(PyGreenlet * (*)(void)) _PyGreenlet_API[PyGreenlet_GetCurrent_NUM])
|
81 |
+
|
82 |
+
/*
|
83 |
+
* PyGreenlet_Throw(
|
84 |
+
* PyGreenlet *greenlet,
|
85 |
+
* PyObject *typ,
|
86 |
+
* PyObject *val,
|
87 |
+
* PyObject *tb)
|
88 |
+
*
|
89 |
+
* g.throw(...)
|
90 |
+
*/
|
91 |
+
# define PyGreenlet_Throw \
|
92 |
+
(*(PyObject * (*)(PyGreenlet * self, \
|
93 |
+
PyObject * typ, \
|
94 |
+
PyObject * val, \
|
95 |
+
PyObject * tb)) \
|
96 |
+
_PyGreenlet_API[PyGreenlet_Throw_NUM])
|
97 |
+
|
98 |
+
/*
|
99 |
+
* PyGreenlet_Switch(PyGreenlet *greenlet, PyObject *args)
|
100 |
+
*
|
101 |
+
* g.switch(*args, **kwargs)
|
102 |
+
*/
|
103 |
+
# define PyGreenlet_Switch \
|
104 |
+
(*(PyObject * \
|
105 |
+
(*)(PyGreenlet * greenlet, PyObject * args, PyObject * kwargs)) \
|
106 |
+
_PyGreenlet_API[PyGreenlet_Switch_NUM])
|
107 |
+
|
108 |
+
/*
|
109 |
+
* PyGreenlet_SetParent(PyObject *greenlet, PyObject *new_parent)
|
110 |
+
*
|
111 |
+
* g.parent = new_parent
|
112 |
+
*/
|
113 |
+
# define PyGreenlet_SetParent \
|
114 |
+
(*(int (*)(PyGreenlet * greenlet, PyGreenlet * nparent)) \
|
115 |
+
_PyGreenlet_API[PyGreenlet_SetParent_NUM])
|
116 |
+
|
117 |
+
/*
|
118 |
+
* PyGreenlet_GetParent(PyObject* greenlet)
|
119 |
+
*
|
120 |
+
* return greenlet.parent;
|
121 |
+
*
|
122 |
+
* This could return NULL even if there is no exception active.
|
123 |
+
* If it does not return NULL, you are responsible for decrementing the
|
124 |
+
* reference count.
|
125 |
+
*/
|
126 |
+
# define PyGreenlet_GetParent \
|
127 |
+
(*(PyGreenlet* (*)(PyGreenlet*)) \
|
128 |
+
_PyGreenlet_API[PyGreenlet_GET_PARENT_NUM])
|
129 |
+
|
130 |
+
/*
|
131 |
+
* deprecated, undocumented alias.
|
132 |
+
*/
|
133 |
+
# define PyGreenlet_GET_PARENT PyGreenlet_GetParent
|
134 |
+
|
135 |
+
# define PyGreenlet_MAIN \
|
136 |
+
(*(int (*)(PyGreenlet*)) \
|
137 |
+
_PyGreenlet_API[PyGreenlet_MAIN_NUM])
|
138 |
+
|
139 |
+
# define PyGreenlet_STARTED \
|
140 |
+
(*(int (*)(PyGreenlet*)) \
|
141 |
+
_PyGreenlet_API[PyGreenlet_STARTED_NUM])
|
142 |
+
|
143 |
+
# define PyGreenlet_ACTIVE \
|
144 |
+
(*(int (*)(PyGreenlet*)) \
|
145 |
+
_PyGreenlet_API[PyGreenlet_ACTIVE_NUM])
|
146 |
+
|
147 |
+
|
148 |
+
|
149 |
+
|
150 |
+
/* Macro that imports greenlet and initializes C API */
|
151 |
+
/* NOTE: This has actually moved to ``greenlet._greenlet._C_API``, but we
|
152 |
+
keep the older definition to be sure older code that might have a copy of
|
153 |
+
the header still works. */
|
154 |
+
# define PyGreenlet_Import() \
|
155 |
+
{ \
|
156 |
+
_PyGreenlet_API = (void**)PyCapsule_Import("greenlet._C_API", 0); \
|
157 |
+
}
|
158 |
+
|
159 |
+
#endif /* GREENLET_MODULE */
|
160 |
+
|
161 |
+
#ifdef __cplusplus
|
162 |
+
}
|
163 |
+
#endif
|
164 |
+
#endif /* !Py_GREENLETOBJECT_H */
|
llm/Lib/site-packages/GitPython-3.1.43.dist-info/AUTHORS
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
GitPython was originally written by Michael Trier.
|
2 |
+
GitPython 0.2 was partially (re)written by Sebastian Thiel, based on 0.1.6 and git-dulwich.
|
3 |
+
|
4 |
+
Contributors are:
|
5 |
+
|
6 |
+
-Michael Trier <mtrier _at_ gmail.com>
|
7 |
+
-Alan Briolat
|
8 |
+
-Florian Apolloner <florian _at_ apolloner.eu>
|
9 |
+
-David Aguilar <davvid _at_ gmail.com>
|
10 |
+
-Jelmer Vernooij <jelmer _at_ samba.org>
|
11 |
+
-Steve Frécinaux <code _at_ istique.net>
|
12 |
+
-Kai Lautaportti <kai _at_ lautaportti.fi>
|
13 |
+
-Paul Sowden <paul _at_ idontsmoke.co.uk>
|
14 |
+
-Sebastian Thiel <byronimo _at_ gmail.com>
|
15 |
+
-Jonathan Chu <jonathan.chu _at_ me.com>
|
16 |
+
-Vincent Driessen <me _at_ nvie.com>
|
17 |
+
-Phil Elson <pelson _dot_ pub _at_ gmail.com>
|
18 |
+
-Bernard `Guyzmo` Pratz <[email protected]>
|
19 |
+
-Timothy B. Hartman <tbhartman _at_ gmail.com>
|
20 |
+
-Konstantin Popov <konstantin.popov.89 _at_ yandex.ru>
|
21 |
+
-Peter Jones <pjones _at_ redhat.com>
|
22 |
+
-Anson Mansfield <anson.mansfield _at_ gmail.com>
|
23 |
+
-Ken Odegard <ken.odegard _at_ gmail.com>
|
24 |
+
-Alexis Horgix Chotard
|
25 |
+
-Piotr Babij <piotr.babij _at_ gmail.com>
|
26 |
+
-Mikuláš Poul <mikulaspoul _at_ gmail.com>
|
27 |
+
-Charles Bouchard-Légaré <cblegare.atl _at_ ntis.ca>
|
28 |
+
-Yaroslav Halchenko <debian _at_ onerussian.com>
|
29 |
+
-Tim Swast <swast _at_ google.com>
|
30 |
+
-William Luc Ritchie
|
31 |
+
-David Host <hostdm _at_ outlook.com>
|
32 |
+
-A. Jesse Jiryu Davis <jesse _at_ emptysquare.net>
|
33 |
+
-Steven Whitman <ninloot _at_ gmail.com>
|
34 |
+
-Stefan Stancu <stefan.stancu _at_ gmail.com>
|
35 |
+
-César Izurieta <cesar _at_ caih.org>
|
36 |
+
-Arthur Milchior <arthur _at_ milchior.fr>
|
37 |
+
-Anil Khatri <anil.soccer.khatri _at_ gmail.com>
|
38 |
+
-JJ Graham <thetwoj _at_ gmail.com>
|
39 |
+
-Ben Thayer <ben _at_ benthayer.com>
|
40 |
+
-Dries Kennes <admin _at_ dries007.net>
|
41 |
+
-Pratik Anurag <panurag247365 _at_ gmail.com>
|
42 |
+
-Harmon <harmon.public _at_ gmail.com>
|
43 |
+
-Liam Beguin <liambeguin _at_ gmail.com>
|
44 |
+
-Ram Rachum <ram _at_ rachum.com>
|
45 |
+
-Alba Mendez <me _at_ alba.sh>
|
46 |
+
-Robert Westman <robert _at_ byteflux.io>
|
47 |
+
-Hugo van Kemenade
|
48 |
+
-Hiroki Tokunaga <tokusan441 _at_ gmail.com>
|
49 |
+
-Julien Mauroy <pro.julien.mauroy _at_ gmail.com>
|
50 |
+
-Patrick Gerard
|
51 |
+
-Luke Twist <[email protected]>
|
52 |
+
-Joseph Hale <me _at_ jhale.dev>
|
53 |
+
-Santos Gallegos <stsewd _at_ proton.me>
|
54 |
+
-Wenhan Zhu <wzhu.cosmos _at_ gmail.com>
|
55 |
+
-Eliah Kagan <eliah.kagan _at_ gmail.com>
|
56 |
+
-Ethan Lin <et.repositories _at_ gmail.com>
|
57 |
+
|
58 |
+
Portions derived from other open source works and are clearly marked.
|
llm/Lib/site-packages/GitPython-3.1.43.dist-info/INSTALLER
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
pip
|
llm/Lib/site-packages/GitPython-3.1.43.dist-info/LICENSE
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright (C) 2008, 2009 Michael Trier and contributors
|
2 |
+
All rights reserved.
|
3 |
+
|
4 |
+
Redistribution and use in source and binary forms, with or without
|
5 |
+
modification, are permitted provided that the following conditions
|
6 |
+
are met:
|
7 |
+
|
8 |
+
* Redistributions of source code must retain the above copyright
|
9 |
+
notice, this list of conditions and the following disclaimer.
|
10 |
+
|
11 |
+
* Redistributions in binary form must reproduce the above copyright
|
12 |
+
notice, this list of conditions and the following disclaimer in the
|
13 |
+
documentation and/or other materials provided with the distribution.
|
14 |
+
|
15 |
+
* Neither the name of the GitPython project nor the names of
|
16 |
+
its contributors may be used to endorse or promote products derived
|
17 |
+
from this software without specific prior written permission.
|
18 |
+
|
19 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
25 |
+
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
26 |
+
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
27 |
+
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
28 |
+
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
29 |
+
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
llm/Lib/site-packages/GitPython-3.1.43.dist-info/METADATA
ADDED
@@ -0,0 +1,297 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Metadata-Version: 2.1
|
2 |
+
Name: GitPython
|
3 |
+
Version: 3.1.43
|
4 |
+
Summary: GitPython is a Python library used to interact with Git repositories
|
5 |
+
Home-page: https://github.com/gitpython-developers/GitPython
|
6 |
+
Author: Sebastian Thiel, Michael Trier
|
7 |
+
Author-email: [email protected], [email protected]
|
8 |
+
License: BSD-3-Clause
|
9 |
+
Classifier: Development Status :: 5 - Production/Stable
|
10 |
+
Classifier: Environment :: Console
|
11 |
+
Classifier: Intended Audience :: Developers
|
12 |
+
Classifier: License :: OSI Approved :: BSD License
|
13 |
+
Classifier: Operating System :: OS Independent
|
14 |
+
Classifier: Operating System :: POSIX
|
15 |
+
Classifier: Operating System :: Microsoft :: Windows
|
16 |
+
Classifier: Operating System :: MacOS :: MacOS X
|
17 |
+
Classifier: Typing :: Typed
|
18 |
+
Classifier: Programming Language :: Python
|
19 |
+
Classifier: Programming Language :: Python :: 3
|
20 |
+
Classifier: Programming Language :: Python :: 3.7
|
21 |
+
Classifier: Programming Language :: Python :: 3.8
|
22 |
+
Classifier: Programming Language :: Python :: 3.9
|
23 |
+
Classifier: Programming Language :: Python :: 3.10
|
24 |
+
Classifier: Programming Language :: Python :: 3.11
|
25 |
+
Classifier: Programming Language :: Python :: 3.12
|
26 |
+
Requires-Python: >=3.7
|
27 |
+
Description-Content-Type: text/markdown
|
28 |
+
License-File: LICENSE
|
29 |
+
License-File: AUTHORS
|
30 |
+
Requires-Dist: gitdb <5,>=4.0.1
|
31 |
+
Requires-Dist: typing-extensions >=3.7.4.3 ; python_version < "3.8"
|
32 |
+
Provides-Extra: doc
|
33 |
+
Requires-Dist: sphinx ==4.3.2 ; extra == 'doc'
|
34 |
+
Requires-Dist: sphinx-rtd-theme ; extra == 'doc'
|
35 |
+
Requires-Dist: sphinxcontrib-applehelp <=1.0.4,>=1.0.2 ; extra == 'doc'
|
36 |
+
Requires-Dist: sphinxcontrib-devhelp ==1.0.2 ; extra == 'doc'
|
37 |
+
Requires-Dist: sphinxcontrib-htmlhelp <=2.0.1,>=2.0.0 ; extra == 'doc'
|
38 |
+
Requires-Dist: sphinxcontrib-qthelp ==1.0.3 ; extra == 'doc'
|
39 |
+
Requires-Dist: sphinxcontrib-serializinghtml ==1.1.5 ; extra == 'doc'
|
40 |
+
Requires-Dist: sphinx-autodoc-typehints ; extra == 'doc'
|
41 |
+
Provides-Extra: test
|
42 |
+
Requires-Dist: coverage[toml] ; extra == 'test'
|
43 |
+
Requires-Dist: ddt !=1.4.3,>=1.1.1 ; extra == 'test'
|
44 |
+
Requires-Dist: mypy ; extra == 'test'
|
45 |
+
Requires-Dist: pre-commit ; extra == 'test'
|
46 |
+
Requires-Dist: pytest >=7.3.1 ; extra == 'test'
|
47 |
+
Requires-Dist: pytest-cov ; extra == 'test'
|
48 |
+
Requires-Dist: pytest-instafail ; extra == 'test'
|
49 |
+
Requires-Dist: pytest-mock ; extra == 'test'
|
50 |
+
Requires-Dist: pytest-sugar ; extra == 'test'
|
51 |
+
Requires-Dist: typing-extensions ; (python_version < "3.11") and extra == 'test'
|
52 |
+
Requires-Dist: mock ; (python_version < "3.8") and extra == 'test'
|
53 |
+
|
54 |
+
![Python package](https://github.com/gitpython-developers/GitPython/workflows/Python%20package/badge.svg)
|
55 |
+
[![Documentation Status](https://readthedocs.org/projects/gitpython/badge/?version=stable)](https://readthedocs.org/projects/gitpython/?badge=stable)
|
56 |
+
[![Packaging status](https://repology.org/badge/tiny-repos/python:gitpython.svg)](https://repology.org/metapackage/python:gitpython/versions)
|
57 |
+
|
58 |
+
## [Gitoxide](https://github.com/Byron/gitoxide): A peek into the future…
|
59 |
+
|
60 |
+
I started working on GitPython in 2009, back in the days when Python was 'my thing' and I had great plans with it.
|
61 |
+
Of course, back in the days, I didn't really know what I was doing and this shows in many places. Somewhat similar to
|
62 |
+
Python this happens to be 'good enough', but at the same time is deeply flawed and broken beyond repair.
|
63 |
+
|
64 |
+
By now, GitPython is widely used and I am sure there is a good reason for that, it's something to be proud of and happy about.
|
65 |
+
The community is maintaining the software and is keeping it relevant for which I am absolutely grateful. For the time to come I am happy to continue maintaining GitPython, remaining hopeful that one day it won't be needed anymore.
|
66 |
+
|
67 |
+
More than 15 years after my first meeting with 'git' I am still in excited about it, and am happy to finally have the tools and
|
68 |
+
probably the skills to scratch that itch of mine: implement `git` in a way that makes tool creation a piece of cake for most.
|
69 |
+
|
70 |
+
If you like the idea and want to learn more, please head over to [gitoxide](https://github.com/Byron/gitoxide), an
|
71 |
+
implementation of 'git' in [Rust](https://www.rust-lang.org).
|
72 |
+
|
73 |
+
*(Please note that `gitoxide` is not currently available for use in Python, and that Rust is required.)*
|
74 |
+
|
75 |
+
## GitPython
|
76 |
+
|
77 |
+
GitPython is a python library used to interact with git repositories, high-level like git-porcelain,
|
78 |
+
or low-level like git-plumbing.
|
79 |
+
|
80 |
+
It provides abstractions of git objects for easy access of repository data often backed by calling the `git`
|
81 |
+
command-line program.
|
82 |
+
|
83 |
+
### DEVELOPMENT STATUS
|
84 |
+
|
85 |
+
This project is in **maintenance mode**, which means that
|
86 |
+
|
87 |
+
- …there will be no feature development, unless these are contributed
|
88 |
+
- …there will be no bug fixes, unless they are relevant to the safety of users, or contributed
|
89 |
+
- …issues will be responded to with waiting times of up to a month
|
90 |
+
|
91 |
+
The project is open to contributions of all kinds, as well as new maintainers.
|
92 |
+
|
93 |
+
### REQUIREMENTS
|
94 |
+
|
95 |
+
GitPython needs the `git` executable to be installed on the system and available in your
|
96 |
+
`PATH` for most operations. If it is not in your `PATH`, you can help GitPython find it
|
97 |
+
by setting the `GIT_PYTHON_GIT_EXECUTABLE=<path/to/git>` environment variable.
|
98 |
+
|
99 |
+
- Git (1.7.x or newer)
|
100 |
+
- Python >= 3.7
|
101 |
+
|
102 |
+
The list of dependencies are listed in `./requirements.txt` and `./test-requirements.txt`.
|
103 |
+
The installer takes care of installing them for you.
|
104 |
+
|
105 |
+
### INSTALL
|
106 |
+
|
107 |
+
GitPython and its required package dependencies can be installed in any of the following ways, all of which should typically be done in a [virtual environment](https://docs.python.org/3/tutorial/venv.html).
|
108 |
+
|
109 |
+
#### From PyPI
|
110 |
+
|
111 |
+
To obtain and install a copy [from PyPI](https://pypi.org/project/GitPython/), run:
|
112 |
+
|
113 |
+
```sh
|
114 |
+
pip install GitPython
|
115 |
+
```
|
116 |
+
|
117 |
+
(A distribution package can also be downloaded for manual installation at [the PyPI page](https://pypi.org/project/GitPython/).)
|
118 |
+
|
119 |
+
#### From downloaded source code
|
120 |
+
|
121 |
+
If you have downloaded the source code, run this from inside the unpacked `GitPython` directory:
|
122 |
+
|
123 |
+
```sh
|
124 |
+
pip install .
|
125 |
+
```
|
126 |
+
|
127 |
+
#### By cloning the source code repository
|
128 |
+
|
129 |
+
To clone the [the GitHub repository](https://github.com/gitpython-developers/GitPython) from source to work on the code, you can do it like so:
|
130 |
+
|
131 |
+
```sh
|
132 |
+
git clone https://github.com/gitpython-developers/GitPython
|
133 |
+
cd GitPython
|
134 |
+
./init-tests-after-clone.sh
|
135 |
+
```
|
136 |
+
|
137 |
+
On Windows, `./init-tests-after-clone.sh` can be run in a Git Bash shell.
|
138 |
+
|
139 |
+
If you are cloning [your own fork](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/about-forks), then replace the above `git clone` command with one that gives the URL of your fork. Or use this [`gh`](https://cli.github.com/) command (assuming you have `gh` and your fork is called `GitPython`):
|
140 |
+
|
141 |
+
```sh
|
142 |
+
gh repo clone GitPython
|
143 |
+
```
|
144 |
+
|
145 |
+
Having cloned the repo, create and activate your [virtual environment](https://docs.python.org/3/tutorial/venv.html).
|
146 |
+
|
147 |
+
Then make an [editable install](https://pip.pypa.io/en/stable/topics/local-project-installs/#editable-installs):
|
148 |
+
|
149 |
+
```sh
|
150 |
+
pip install -e ".[test]"
|
151 |
+
```
|
152 |
+
|
153 |
+
In the less common case that you do not want to install test dependencies, `pip install -e .` can be used instead.
|
154 |
+
|
155 |
+
#### With editable *dependencies* (not preferred, and rarely needed)
|
156 |
+
|
157 |
+
In rare cases, you may want to work on GitPython and one or both of its [gitdb](https://github.com/gitpython-developers/gitdb) and [smmap](https://github.com/gitpython-developers/smmap) dependencies at the same time, with changes in your local working copy of gitdb or smmap immediatley reflected in the behavior of your local working copy of GitPython. This can be done by making editable installations of those dependencies in the same virtual environment where you install GitPython.
|
158 |
+
|
159 |
+
If you want to do that *and* you want the versions in GitPython's git submodules to be used, then pass `-e git/ext/gitdb` and/or `-e git/ext/gitdb/gitdb/ext/smmap` to `pip install`. This can be done in any order, and in separate `pip install` commands or the same one, so long as `-e` appears before *each* path. For example, you can install GitPython, gitdb, and smmap editably in the currently active virtual environment this way:
|
160 |
+
|
161 |
+
```sh
|
162 |
+
pip install -e ".[test]" -e git/ext/gitdb -e git/ext/gitdb/gitdb/ext/smmap
|
163 |
+
```
|
164 |
+
|
165 |
+
The submodules must have been cloned for that to work, but that will already be the case if you have run `./init-tests-after-clone.sh`. You can use `pip list` to check which packages are installed editably and which are installed normally.
|
166 |
+
|
167 |
+
To reiterate, this approach should only rarely be used. For most development it is preferable to allow the gitdb and smmap dependencices to be retrieved automatically from PyPI in their latest stable packaged versions.
|
168 |
+
|
169 |
+
### Limitations
|
170 |
+
|
171 |
+
#### Leakage of System Resources
|
172 |
+
|
173 |
+
GitPython is not suited for long-running processes (like daemons) as it tends to
|
174 |
+
leak system resources. It was written in a time where destructors (as implemented
|
175 |
+
in the `__del__` method) still ran deterministically.
|
176 |
+
|
177 |
+
In case you still want to use it in such a context, you will want to search the
|
178 |
+
codebase for `__del__` implementations and call these yourself when you see fit.
|
179 |
+
|
180 |
+
Another way assure proper cleanup of resources is to factor out GitPython into a
|
181 |
+
separate process which can be dropped periodically.
|
182 |
+
|
183 |
+
#### Windows support
|
184 |
+
|
185 |
+
See [Issue #525](https://github.com/gitpython-developers/GitPython/issues/525).
|
186 |
+
|
187 |
+
### RUNNING TESTS
|
188 |
+
|
189 |
+
_Important_: Right after cloning this repository, please be sure to have executed
|
190 |
+
the `./init-tests-after-clone.sh` script in the repository root. Otherwise
|
191 |
+
you will encounter test failures.
|
192 |
+
|
193 |
+
#### Install test dependencies
|
194 |
+
|
195 |
+
Ensure testing libraries are installed. This is taken care of already if you installed with:
|
196 |
+
|
197 |
+
```sh
|
198 |
+
pip install -e ".[test]"
|
199 |
+
```
|
200 |
+
|
201 |
+
If you had installed with a command like `pip install -e .` instead, you can still run
|
202 |
+
the above command to add the testing dependencies.
|
203 |
+
|
204 |
+
#### Test commands
|
205 |
+
|
206 |
+
To test, run:
|
207 |
+
|
208 |
+
```sh
|
209 |
+
pytest
|
210 |
+
```
|
211 |
+
|
212 |
+
To lint, and apply some linting fixes as well as automatic code formatting, run:
|
213 |
+
|
214 |
+
```sh
|
215 |
+
pre-commit run --all-files
|
216 |
+
```
|
217 |
+
|
218 |
+
This includes the linting and autoformatting done by Ruff, as well as some other checks.
|
219 |
+
|
220 |
+
To typecheck, run:
|
221 |
+
|
222 |
+
```sh
|
223 |
+
mypy
|
224 |
+
```
|
225 |
+
|
226 |
+
#### CI (and tox)
|
227 |
+
|
228 |
+
Style and formatting checks, and running tests on all the different supported Python versions, will be performed:
|
229 |
+
|
230 |
+
- Upon submitting a pull request.
|
231 |
+
- On each push, *if* you have a fork with GitHub Actions enabled.
|
232 |
+
- Locally, if you run [`tox`](https://tox.wiki/) (this skips any Python versions you don't have installed).
|
233 |
+
|
234 |
+
#### Configuration files
|
235 |
+
|
236 |
+
Specific tools are all configured in the `./pyproject.toml` file:
|
237 |
+
|
238 |
+
- `pytest` (test runner)
|
239 |
+
- `coverage.py` (code coverage)
|
240 |
+
- `ruff` (linter and formatter)
|
241 |
+
- `mypy` (type checker)
|
242 |
+
|
243 |
+
Orchestration tools:
|
244 |
+
|
245 |
+
- Configuration for `pre-commit` is in the `./.pre-commit-config.yaml` file.
|
246 |
+
- Configuration for `tox` is in `./tox.ini`.
|
247 |
+
- Configuration for GitHub Actions (CI) is in files inside `./.github/workflows/`.
|
248 |
+
|
249 |
+
### Contributions
|
250 |
+
|
251 |
+
Please have a look at the [contributions file][contributing].
|
252 |
+
|
253 |
+
### INFRASTRUCTURE
|
254 |
+
|
255 |
+
- [User Documentation](http://gitpython.readthedocs.org)
|
256 |
+
- [Questions and Answers](http://stackexchange.com/filters/167317/gitpython)
|
257 |
+
- Please post on Stack Overflow and use the `gitpython` tag
|
258 |
+
- [Issue Tracker](https://github.com/gitpython-developers/GitPython/issues)
|
259 |
+
- Post reproducible bugs and feature requests as a new issue.
|
260 |
+
Please be sure to provide the following information if posting bugs:
|
261 |
+
- GitPython version (e.g. `import git; git.__version__`)
|
262 |
+
- Python version (e.g. `python --version`)
|
263 |
+
- The encountered stack-trace, if applicable
|
264 |
+
- Enough information to allow reproducing the issue
|
265 |
+
|
266 |
+
### How to make a new release
|
267 |
+
|
268 |
+
1. Update/verify the **version** in the `VERSION` file.
|
269 |
+
2. Update/verify that the `doc/source/changes.rst` changelog file was updated. It should include a link to the forthcoming release page: `https://github.com/gitpython-developers/GitPython/releases/tag/<version>`
|
270 |
+
3. Commit everything.
|
271 |
+
4. Run `git tag -s <version>` to tag the version in Git.
|
272 |
+
5. _Optionally_ create and activate a [virtual environment](https://packaging.python.org/en/latest/guides/installing-using-pip-and-virtual-environments/#creating-a-virtual-environment). (Then the next step can install `build` and `twine`.)
|
273 |
+
6. Run `make release`.
|
274 |
+
7. Go to [GitHub Releases](https://github.com/gitpython-developers/GitPython/releases) and publish a new one with the recently pushed tag. Generate the changelog.
|
275 |
+
|
276 |
+
### Projects using GitPython
|
277 |
+
|
278 |
+
- [PyDriller](https://github.com/ishepard/pydriller)
|
279 |
+
- [Kivy Designer](https://github.com/kivy/kivy-designer)
|
280 |
+
- [Prowl](https://github.com/nettitude/Prowl)
|
281 |
+
- [Python Taint](https://github.com/python-security/pyt)
|
282 |
+
- [Buster](https://github.com/axitkhurana/buster)
|
283 |
+
- [git-ftp](https://github.com/ezyang/git-ftp)
|
284 |
+
- [Git-Pandas](https://github.com/wdm0006/git-pandas)
|
285 |
+
- [PyGitUp](https://github.com/msiemens/PyGitUp)
|
286 |
+
- [PyJFuzz](https://github.com/mseclab/PyJFuzz)
|
287 |
+
- [Loki](https://github.com/Neo23x0/Loki)
|
288 |
+
- [Omniwallet](https://github.com/OmniLayer/omniwallet)
|
289 |
+
- [GitViper](https://github.com/BeayemX/GitViper)
|
290 |
+
- [Git Gud](https://github.com/bthayer2365/git-gud)
|
291 |
+
|
292 |
+
### LICENSE
|
293 |
+
|
294 |
+
[3-Clause BSD License](https://opensource.org/license/bsd-3-clause/), also known as the New BSD License. See the [LICENSE file][license].
|
295 |
+
|
296 |
+
[contributing]: https://github.com/gitpython-developers/GitPython/blob/main/CONTRIBUTING.md
|
297 |
+
[license]: https://github.com/gitpython-developers/GitPython/blob/main/LICENSE
|
llm/Lib/site-packages/GitPython-3.1.43.dist-info/RECORD
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
GitPython-3.1.43.dist-info/AUTHORS,sha256=h1TlPKfp05GA1eKQ15Yl4biR0C0FgivuGSeRA6Q1dz0,2286
|
2 |
+
GitPython-3.1.43.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
3 |
+
GitPython-3.1.43.dist-info/LICENSE,sha256=hvyUwyGpr7wRUUcTURuv3tIl8lEA3MD3NQ6CvCMbi-s,1503
|
4 |
+
GitPython-3.1.43.dist-info/METADATA,sha256=sAh3r1BMVw5_olGgDmpMS69zBpVr7UEOeRivNHKznfU,13376
|
5 |
+
GitPython-3.1.43.dist-info/RECORD,,
|
6 |
+
GitPython-3.1.43.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
7 |
+
GitPython-3.1.43.dist-info/top_level.txt,sha256=0hzDuIp8obv624V3GmbqsagBWkk8ohtGU-Bc1PmTT0o,4
|
8 |
+
git/__init__.py,sha256=w6fnS0QmwTfEFUSL6rfnpP0lUId2goSguZFOvVX3N3U,8899
|
9 |
+
git/__pycache__/__init__.cpython-311.pyc,,
|
10 |
+
git/__pycache__/cmd.cpython-311.pyc,,
|
11 |
+
git/__pycache__/compat.cpython-311.pyc,,
|
12 |
+
git/__pycache__/config.cpython-311.pyc,,
|
13 |
+
git/__pycache__/db.cpython-311.pyc,,
|
14 |
+
git/__pycache__/diff.cpython-311.pyc,,
|
15 |
+
git/__pycache__/exc.cpython-311.pyc,,
|
16 |
+
git/__pycache__/remote.cpython-311.pyc,,
|
17 |
+
git/__pycache__/types.cpython-311.pyc,,
|
18 |
+
git/__pycache__/util.cpython-311.pyc,,
|
19 |
+
git/cmd.py,sha256=qd-gIHSk4mfsYjd9YA08cPyO8TMxaibTXAbFnHK71uc,67659
|
20 |
+
git/compat.py,sha256=y1E6y6O2q5r8clSlr8ZNmuIWG9nmHuehQEsVsmBffs8,4526
|
21 |
+
git/config.py,sha256=Ald8Xc-G9Shcgx3QCISyXTkL4a6nbc3qll-xUw4YdyY,34924
|
22 |
+
git/db.py,sha256=vIW9uWSbqu99zbuU2ZDmOhVOv1UPTmxrnqiCtRHCfjE,2368
|
23 |
+
git/diff.py,sha256=IE5aeHL7aP9yxBluYj06IX8nZjoJ_TOM3gG31-Evf_8,27058
|
24 |
+
git/exc.py,sha256=Gc7g1pHpn8OmTse30NHmJVsBJ2CYH8LxaR8y8UA3lIM,7119
|
25 |
+
git/index/__init__.py,sha256=i-Nqb8Lufp9aFbmxpQBORmmQnjEVVM1Pn58fsQkyGgQ,406
|
26 |
+
git/index/__pycache__/__init__.cpython-311.pyc,,
|
27 |
+
git/index/__pycache__/base.cpython-311.pyc,,
|
28 |
+
git/index/__pycache__/fun.cpython-311.pyc,,
|
29 |
+
git/index/__pycache__/typ.cpython-311.pyc,,
|
30 |
+
git/index/__pycache__/util.cpython-311.pyc,,
|
31 |
+
git/index/base.py,sha256=A4q4cN_Ifxi8CsAR-7h4KsQ2d3JazBNFZ1ltbAKttgs,60734
|
32 |
+
git/index/fun.py,sha256=37cA3DBC9vpAnSVu5TGA072SnoF5XZOkOukExwlejHs,16736
|
33 |
+
git/index/typ.py,sha256=uuKNwitUw83FhVaLSwo4pY7PHDQudtZTLJrLGym4jcI,6570
|
34 |
+
git/index/util.py,sha256=fULi7GPG-MvprKrRCD5c15GNdzku_1E38We0d97WB3A,3659
|
35 |
+
git/objects/__init__.py,sha256=O6ZL_olX7e5-8iIbKviRPkVSJxN37WA-EC0q9d48U5Y,637
|
36 |
+
git/objects/__pycache__/__init__.cpython-311.pyc,,
|
37 |
+
git/objects/__pycache__/base.cpython-311.pyc,,
|
38 |
+
git/objects/__pycache__/blob.cpython-311.pyc,,
|
39 |
+
git/objects/__pycache__/commit.cpython-311.pyc,,
|
40 |
+
git/objects/__pycache__/fun.cpython-311.pyc,,
|
41 |
+
git/objects/__pycache__/tag.cpython-311.pyc,,
|
42 |
+
git/objects/__pycache__/tree.cpython-311.pyc,,
|
43 |
+
git/objects/__pycache__/util.cpython-311.pyc,,
|
44 |
+
git/objects/base.py,sha256=0dqNkSRVH0mk0-7ZKIkGBK7iNYrzLTVxwQFUd6CagsE,10277
|
45 |
+
git/objects/blob.py,sha256=zwwq0KfOMYeP5J2tW5CQatoLyeqFRlfkxP1Vwx1h07s,1215
|
46 |
+
git/objects/commit.py,sha256=vLZNl1I9zp17Rpge7J66CvsryirEs90jyPTQzoP0JJs,30208
|
47 |
+
git/objects/fun.py,sha256=B4jCqhAjm6Hl79GK58FPzW1H9K6Wc7Tx0rssyWmAcEE,8935
|
48 |
+
git/objects/submodule/__init__.py,sha256=6xySp767LVz3UylWgUalntS_nGXRuVzXxDuFAv_Wc2c,303
|
49 |
+
git/objects/submodule/__pycache__/__init__.cpython-311.pyc,,
|
50 |
+
git/objects/submodule/__pycache__/base.cpython-311.pyc,,
|
51 |
+
git/objects/submodule/__pycache__/root.cpython-311.pyc,,
|
52 |
+
git/objects/submodule/__pycache__/util.cpython-311.pyc,,
|
53 |
+
git/objects/submodule/base.py,sha256=MQ-2xV8JznGwy2hLQv1aeQNgAkhBhgc5tdtClFL3DmE,63901
|
54 |
+
git/objects/submodule/root.py,sha256=5eTtYNHasqdPq6q0oDCPr7IaO6uAHL3b4DxMoiO2LhE,20246
|
55 |
+
git/objects/submodule/util.py,sha256=sQqAYaiSJdFkZa9NlAuK_wTsMNiS-kkQnQjvIoJtc_o,3509
|
56 |
+
git/objects/tag.py,sha256=gAx8i-DEwy_Z3R2zLkvetYRV8A56BCcTr3iLuTUTfEM,4467
|
57 |
+
git/objects/tree.py,sha256=jJH888SHiP4dGzE-ra1yenQOyya_0C_MkHr06c1gHpM,13849
|
58 |
+
git/objects/util.py,sha256=Ml2eqZPKO4y9Hc2vWbXJgpsK3nkN3KGMzbn8AlzLyYQ,23834
|
59 |
+
git/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
60 |
+
git/refs/__init__.py,sha256=DWlJNnsx-4jM_E-VycbP-FZUdn6iWhjnH_uZ_pZXBro,509
|
61 |
+
git/refs/__pycache__/__init__.cpython-311.pyc,,
|
62 |
+
git/refs/__pycache__/head.cpython-311.pyc,,
|
63 |
+
git/refs/__pycache__/log.cpython-311.pyc,,
|
64 |
+
git/refs/__pycache__/reference.cpython-311.pyc,,
|
65 |
+
git/refs/__pycache__/remote.cpython-311.pyc,,
|
66 |
+
git/refs/__pycache__/symbolic.cpython-311.pyc,,
|
67 |
+
git/refs/__pycache__/tag.cpython-311.pyc,,
|
68 |
+
git/refs/head.py,sha256=GAZpD5EfqSciDXPtgjHY8ZbBixKExJRhojUB-HrrJPg,10491
|
69 |
+
git/refs/log.py,sha256=kXiuAgTo1DIuM_BfbDUk9gQ0YO-mutIMVdHv1_ES90o,12493
|
70 |
+
git/refs/reference.py,sha256=l6mhF4YLSEwtjz6b9PpOQH-fkng7EYWMaJhkjn-2jXA,5630
|
71 |
+
git/refs/remote.py,sha256=WwqV9T7BbYf3F_WZNUQivu9xktIIKGklCjDpwQrhD-A,2806
|
72 |
+
git/refs/symbolic.py,sha256=c8zOwaqzcg-J-rGrpuWdvh8zwMvSUqAHghd4vJoYG_s,34552
|
73 |
+
git/refs/tag.py,sha256=kgzV2vhpL4FD2TqHb0BJuMRAHgAvJF-TcoyWlaB-djQ,5010
|
74 |
+
git/remote.py,sha256=IHQ3BvXgoIN1EvHlyH3vrSaQoDkLOE6nooSC0w183sU,46561
|
75 |
+
git/repo/__init__.py,sha256=CILSVH36fX_WxVFSjD9o1WF5LgsNedPiJvSngKZqfVU,210
|
76 |
+
git/repo/__pycache__/__init__.cpython-311.pyc,,
|
77 |
+
git/repo/__pycache__/base.cpython-311.pyc,,
|
78 |
+
git/repo/__pycache__/fun.cpython-311.pyc,,
|
79 |
+
git/repo/base.py,sha256=mitfJ8u99CsMpDd7_VRyx-SF8omu2tpf3lqzSaQkKoQ,59353
|
80 |
+
git/repo/fun.py,sha256=tEsClpmbOrKMSNIdncOB_6JdikrL1-AfkOFd7xMpD8k,13582
|
81 |
+
git/types.py,sha256=xCwpp2Y01lhS0MapHhj04m0P_x34kwSD1Gsou_ZPWj8,10251
|
82 |
+
git/util.py,sha256=1E883mnPAFLyFk7ivwnEremsp-uJOTc3ks_QypyLung,43651
|
llm/Lib/site-packages/GitPython-3.1.43.dist-info/WHEEL
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Wheel-Version: 1.0
|
2 |
+
Generator: bdist_wheel (0.43.0)
|
3 |
+
Root-Is-Purelib: true
|
4 |
+
Tag: py3-none-any
|
5 |
+
|
llm/Lib/site-packages/GitPython-3.1.43.dist-info/top_level.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
git
|
llm/Lib/site-packages/Jinja2-3.1.3.dist-info/INSTALLER
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
pip
|
llm/Lib/site-packages/Jinja2-3.1.3.dist-info/LICENSE.rst
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright 2007 Pallets
|
2 |
+
|
3 |
+
Redistribution and use in source and binary forms, with or without
|
4 |
+
modification, are permitted provided that the following conditions are
|
5 |
+
met:
|
6 |
+
|
7 |
+
1. Redistributions of source code must retain the above copyright
|
8 |
+
notice, this list of conditions and the following disclaimer.
|
9 |
+
|
10 |
+
2. Redistributions in binary form must reproduce the above copyright
|
11 |
+
notice, this list of conditions and the following disclaimer in the
|
12 |
+
documentation and/or other materials provided with the distribution.
|
13 |
+
|
14 |
+
3. Neither the name of the copyright holder nor the names of its
|
15 |
+
contributors may be used to endorse or promote products derived from
|
16 |
+
this software without specific prior written permission.
|
17 |
+
|
18 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
19 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
20 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
21 |
+
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
22 |
+
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
23 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
24 |
+
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
25 |
+
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
26 |
+
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
27 |
+
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
28 |
+
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
llm/Lib/site-packages/Jinja2-3.1.3.dist-info/METADATA
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Metadata-Version: 2.1
|
2 |
+
Name: Jinja2
|
3 |
+
Version: 3.1.3
|
4 |
+
Summary: A very fast and expressive template engine.
|
5 |
+
Home-page: https://palletsprojects.com/p/jinja/
|
6 |
+
Maintainer: Pallets
|
7 |
+
Maintainer-email: [email protected]
|
8 |
+
License: BSD-3-Clause
|
9 |
+
Project-URL: Donate, https://palletsprojects.com/donate
|
10 |
+
Project-URL: Documentation, https://jinja.palletsprojects.com/
|
11 |
+
Project-URL: Changes, https://jinja.palletsprojects.com/changes/
|
12 |
+
Project-URL: Source Code, https://github.com/pallets/jinja/
|
13 |
+
Project-URL: Issue Tracker, https://github.com/pallets/jinja/issues/
|
14 |
+
Project-URL: Chat, https://discord.gg/pallets
|
15 |
+
Classifier: Development Status :: 5 - Production/Stable
|
16 |
+
Classifier: Environment :: Web Environment
|
17 |
+
Classifier: Intended Audience :: Developers
|
18 |
+
Classifier: License :: OSI Approved :: BSD License
|
19 |
+
Classifier: Operating System :: OS Independent
|
20 |
+
Classifier: Programming Language :: Python
|
21 |
+
Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
|
22 |
+
Classifier: Topic :: Text Processing :: Markup :: HTML
|
23 |
+
Requires-Python: >=3.7
|
24 |
+
Description-Content-Type: text/x-rst
|
25 |
+
License-File: LICENSE.rst
|
26 |
+
Requires-Dist: MarkupSafe >=2.0
|
27 |
+
Provides-Extra: i18n
|
28 |
+
Requires-Dist: Babel >=2.7 ; extra == 'i18n'
|
29 |
+
|
30 |
+
Jinja
|
31 |
+
=====
|
32 |
+
|
33 |
+
Jinja is a fast, expressive, extensible templating engine. Special
|
34 |
+
placeholders in the template allow writing code similar to Python
|
35 |
+
syntax. Then the template is passed data to render the final document.
|
36 |
+
|
37 |
+
It includes:
|
38 |
+
|
39 |
+
- Template inheritance and inclusion.
|
40 |
+
- Define and import macros within templates.
|
41 |
+
- HTML templates can use autoescaping to prevent XSS from untrusted
|
42 |
+
user input.
|
43 |
+
- A sandboxed environment can safely render untrusted templates.
|
44 |
+
- AsyncIO support for generating templates and calling async
|
45 |
+
functions.
|
46 |
+
- I18N support with Babel.
|
47 |
+
- Templates are compiled to optimized Python code just-in-time and
|
48 |
+
cached, or can be compiled ahead-of-time.
|
49 |
+
- Exceptions point to the correct line in templates to make debugging
|
50 |
+
easier.
|
51 |
+
- Extensible filters, tests, functions, and even syntax.
|
52 |
+
|
53 |
+
Jinja's philosophy is that while application logic belongs in Python if
|
54 |
+
possible, it shouldn't make the template designer's job difficult by
|
55 |
+
restricting functionality too much.
|
56 |
+
|
57 |
+
|
58 |
+
Installing
|
59 |
+
----------
|
60 |
+
|
61 |
+
Install and update using `pip`_:
|
62 |
+
|
63 |
+
.. code-block:: text
|
64 |
+
|
65 |
+
$ pip install -U Jinja2
|
66 |
+
|
67 |
+
.. _pip: https://pip.pypa.io/en/stable/getting-started/
|
68 |
+
|
69 |
+
|
70 |
+
In A Nutshell
|
71 |
+
-------------
|
72 |
+
|
73 |
+
.. code-block:: jinja
|
74 |
+
|
75 |
+
{% extends "base.html" %}
|
76 |
+
{% block title %}Members{% endblock %}
|
77 |
+
{% block content %}
|
78 |
+
<ul>
|
79 |
+
{% for user in users %}
|
80 |
+
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
|
81 |
+
{% endfor %}
|
82 |
+
</ul>
|
83 |
+
{% endblock %}
|
84 |
+
|
85 |
+
|
86 |
+
Donate
|
87 |
+
------
|
88 |
+
|
89 |
+
The Pallets organization develops and supports Jinja and other popular
|
90 |
+
packages. In order to grow the community of contributors and users, and
|
91 |
+
allow the maintainers to devote more time to the projects, `please
|
92 |
+
donate today`_.
|
93 |
+
|
94 |
+
.. _please donate today: https://palletsprojects.com/donate
|
95 |
+
|
96 |
+
|
97 |
+
Links
|
98 |
+
-----
|
99 |
+
|
100 |
+
- Documentation: https://jinja.palletsprojects.com/
|
101 |
+
- Changes: https://jinja.palletsprojects.com/changes/
|
102 |
+
- PyPI Releases: https://pypi.org/project/Jinja2/
|
103 |
+
- Source Code: https://github.com/pallets/jinja/
|
104 |
+
- Issue Tracker: https://github.com/pallets/jinja/issues/
|
105 |
+
- Chat: https://discord.gg/pallets
|
llm/Lib/site-packages/Jinja2-3.1.3.dist-info/RECORD
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Jinja2-3.1.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
2 |
+
Jinja2-3.1.3.dist-info/LICENSE.rst,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475
|
3 |
+
Jinja2-3.1.3.dist-info/METADATA,sha256=0cLNbRCI91jytc7Bzv3XAQfZzFDF2gxkJuH46eF5vew,3301
|
4 |
+
Jinja2-3.1.3.dist-info/RECORD,,
|
5 |
+
Jinja2-3.1.3.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
|
6 |
+
Jinja2-3.1.3.dist-info/entry_points.txt,sha256=zRd62fbqIyfUpsRtU7EVIFyiu1tPwfgO7EvPErnxgTE,59
|
7 |
+
Jinja2-3.1.3.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7
|
8 |
+
jinja2/__init__.py,sha256=NTBwMwsECrdHmxeXF7seusHLzrh6Ldn1A9qhS5cDuf0,1927
|
9 |
+
jinja2/__pycache__/__init__.cpython-311.pyc,,
|
10 |
+
jinja2/__pycache__/_identifier.cpython-311.pyc,,
|
11 |
+
jinja2/__pycache__/async_utils.cpython-311.pyc,,
|
12 |
+
jinja2/__pycache__/bccache.cpython-311.pyc,,
|
13 |
+
jinja2/__pycache__/compiler.cpython-311.pyc,,
|
14 |
+
jinja2/__pycache__/constants.cpython-311.pyc,,
|
15 |
+
jinja2/__pycache__/debug.cpython-311.pyc,,
|
16 |
+
jinja2/__pycache__/defaults.cpython-311.pyc,,
|
17 |
+
jinja2/__pycache__/environment.cpython-311.pyc,,
|
18 |
+
jinja2/__pycache__/exceptions.cpython-311.pyc,,
|
19 |
+
jinja2/__pycache__/ext.cpython-311.pyc,,
|
20 |
+
jinja2/__pycache__/filters.cpython-311.pyc,,
|
21 |
+
jinja2/__pycache__/idtracking.cpython-311.pyc,,
|
22 |
+
jinja2/__pycache__/lexer.cpython-311.pyc,,
|
23 |
+
jinja2/__pycache__/loaders.cpython-311.pyc,,
|
24 |
+
jinja2/__pycache__/meta.cpython-311.pyc,,
|
25 |
+
jinja2/__pycache__/nativetypes.cpython-311.pyc,,
|
26 |
+
jinja2/__pycache__/nodes.cpython-311.pyc,,
|
27 |
+
jinja2/__pycache__/optimizer.cpython-311.pyc,,
|
28 |
+
jinja2/__pycache__/parser.cpython-311.pyc,,
|
29 |
+
jinja2/__pycache__/runtime.cpython-311.pyc,,
|
30 |
+
jinja2/__pycache__/sandbox.cpython-311.pyc,,
|
31 |
+
jinja2/__pycache__/tests.cpython-311.pyc,,
|
32 |
+
jinja2/__pycache__/utils.cpython-311.pyc,,
|
33 |
+
jinja2/__pycache__/visitor.cpython-311.pyc,,
|
34 |
+
jinja2/_identifier.py,sha256=_zYctNKzRqlk_murTNlzrju1FFJL7Va_Ijqqd7ii2lU,1958
|
35 |
+
jinja2/async_utils.py,sha256=dFcmh6lMNfbh7eLKrBio8JqAKLHdZbpCuurFN4OERtY,2447
|
36 |
+
jinja2/bccache.py,sha256=mhz5xtLxCcHRAa56azOhphIAe19u1we0ojifNMClDio,14061
|
37 |
+
jinja2/compiler.py,sha256=PJzYdRLStlEOqmnQs1YxlizPrJoj3jTZuUleREn6AIQ,72199
|
38 |
+
jinja2/constants.py,sha256=GMoFydBF_kdpaRKPoM5cl5MviquVRLVyZtfp5-16jg0,1433
|
39 |
+
jinja2/debug.py,sha256=iWJ432RadxJNnaMOPrjIDInz50UEgni3_HKuFXi2vuQ,6299
|
40 |
+
jinja2/defaults.py,sha256=boBcSw78h-lp20YbaXSJsqkAI2uN_mD_TtCydpeq5wU,1267
|
41 |
+
jinja2/environment.py,sha256=0qldX3VQKZcm6lgn7zHz94oRFow7YPYERiqkquomNjU,61253
|
42 |
+
jinja2/exceptions.py,sha256=ioHeHrWwCWNaXX1inHmHVblvc4haO7AXsjCp3GfWvx0,5071
|
43 |
+
jinja2/ext.py,sha256=5fnMpllaXkfm2P_93RIvi-OnK7Tk8mCW8Du-GcD12Hc,31844
|
44 |
+
jinja2/filters.py,sha256=vYjKb2zaPShvYtn_LpSmqfS8SScbrA_KOanNibsMDIE,53862
|
45 |
+
jinja2/idtracking.py,sha256=GfNmadir4oDALVxzn3DL9YInhJDr69ebXeA2ygfuCGA,10704
|
46 |
+
jinja2/lexer.py,sha256=DW2nX9zk-6MWp65YR2bqqj0xqCvLtD-u9NWT8AnFRxQ,29726
|
47 |
+
jinja2/loaders.py,sha256=ayAwxfrA1SAffQta0nwSDm3TDT4KYiIGN_D9Z45B310,23085
|
48 |
+
jinja2/meta.py,sha256=GNPEvifmSaU3CMxlbheBOZjeZ277HThOPUTf1RkppKQ,4396
|
49 |
+
jinja2/nativetypes.py,sha256=7GIGALVJgdyL80oZJdQUaUfwSt5q2lSSZbXt0dNf_M4,4210
|
50 |
+
jinja2/nodes.py,sha256=i34GPRAZexXMT6bwuf5SEyvdmS-bRCy9KMjwN5O6pjk,34550
|
51 |
+
jinja2/optimizer.py,sha256=tHkMwXxfZkbfA1KmLcqmBMSaz7RLIvvItrJcPoXTyD8,1650
|
52 |
+
jinja2/parser.py,sha256=Y199wPL-G67gJoi5G_5sHuu9uEP1PJkjjLEW_xTH8-k,39736
|
53 |
+
jinja2/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
54 |
+
jinja2/runtime.py,sha256=_6LkKIWFJjQdqlrgA3K39zBFQ-7Orm3wGDm96RwxQoE,33406
|
55 |
+
jinja2/sandbox.py,sha256=Y0xZeXQnH6EX5VjaV2YixESxoepnRbW_3UeQosaBU3M,14584
|
56 |
+
jinja2/tests.py,sha256=Am5Z6Lmfr2XaH_npIfJJ8MdXtWsbLjMULZJulTAj30E,5905
|
57 |
+
jinja2/utils.py,sha256=IMwRIcN1SsTw2-jdQtlH2KzNABsXZBW_-tnFXafQBvY,23933
|
58 |
+
jinja2/visitor.py,sha256=MH14C6yq24G_KVtWzjwaI7Wg14PCJIYlWW1kpkxYak0,3568
|
llm/Lib/site-packages/Jinja2-3.1.3.dist-info/WHEEL
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Wheel-Version: 1.0
|
2 |
+
Generator: bdist_wheel (0.42.0)
|
3 |
+
Root-Is-Purelib: true
|
4 |
+
Tag: py3-none-any
|
5 |
+
|
llm/Lib/site-packages/Jinja2-3.1.3.dist-info/entry_points.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
[babel.extractors]
|
2 |
+
jinja2 = jinja2.ext:babel_extract[i18n]
|
llm/Lib/site-packages/Jinja2-3.1.3.dist-info/top_level.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
jinja2
|
llm/Lib/site-packages/accelerate-0.29.3.dist-info/INSTALLER
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
pip
|
llm/Lib/site-packages/accelerate-0.29.3.dist-info/LICENSE
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Apache License
|
2 |
+
Version 2.0, January 2004
|
3 |
+
http://www.apache.org/licenses/
|
4 |
+
|
5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
6 |
+
|
7 |
+
1. Definitions.
|
8 |
+
|
9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
11 |
+
|
12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
13 |
+
the copyright owner that is granting the License.
|
14 |
+
|
15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
16 |
+
other entities that control, are controlled by, or are under common
|
17 |
+
control with that entity. For the purposes of this definition,
|
18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
19 |
+
direction or management of such entity, whether by contract or
|
20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
22 |
+
|
23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
24 |
+
exercising permissions granted by this License.
|
25 |
+
|
26 |
+
"Source" form shall mean the preferred form for making modifications,
|
27 |
+
including but not limited to software source code, documentation
|
28 |
+
source, and configuration files.
|
29 |
+
|
30 |
+
"Object" form shall mean any form resulting from mechanical
|
31 |
+
transformation or translation of a Source form, including but
|
32 |
+
not limited to compiled object code, generated documentation,
|
33 |
+
and conversions to other media types.
|
34 |
+
|
35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
36 |
+
Object form, made available under the License, as indicated by a
|
37 |
+
copyright notice that is included in or attached to the work
|
38 |
+
(an example is provided in the Appendix below).
|
39 |
+
|
40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
41 |
+
form, that is based on (or derived from) the Work and for which the
|
42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
44 |
+
of this License, Derivative Works shall not include works that remain
|
45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
46 |
+
the Work and Derivative Works thereof.
|
47 |
+
|
48 |
+
"Contribution" shall mean any work of authorship, including
|
49 |
+
the original version of the Work and any modifications or additions
|
50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
54 |
+
means any form of electronic, verbal, or written communication sent
|
55 |
+
to the Licensor or its representatives, including but not limited to
|
56 |
+
communication on electronic mailing lists, source code control systems,
|
57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
59 |
+
excluding communication that is conspicuously marked or otherwise
|
60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
61 |
+
|
62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
64 |
+
subsequently incorporated within the Work.
|
65 |
+
|
66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
71 |
+
Work and such Derivative Works in Source or Object form.
|
72 |
+
|
73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
76 |
+
(except as stated in this section) patent license to make, have made,
|
77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
78 |
+
where such license applies only to those patent claims licensable
|
79 |
+
by such Contributor that are necessarily infringed by their
|
80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
82 |
+
institute patent litigation against any entity (including a
|
83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
84 |
+
or a Contribution incorporated within the Work constitutes direct
|
85 |
+
or contributory patent infringement, then any patent licenses
|
86 |
+
granted to You under this License for that Work shall terminate
|
87 |
+
as of the date such litigation is filed.
|
88 |
+
|
89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
90 |
+
Work or Derivative Works thereof in any medium, with or without
|
91 |
+
modifications, and in Source or Object form, provided that You
|
92 |
+
meet the following conditions:
|
93 |
+
|
94 |
+
(a) You must give any other recipients of the Work or
|
95 |
+
Derivative Works a copy of this License; and
|
96 |
+
|
97 |
+
(b) You must cause any modified files to carry prominent notices
|
98 |
+
stating that You changed the files; and
|
99 |
+
|
100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
101 |
+
that You distribute, all copyright, patent, trademark, and
|
102 |
+
attribution notices from the Source form of the Work,
|
103 |
+
excluding those notices that do not pertain to any part of
|
104 |
+
the Derivative Works; and
|
105 |
+
|
106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
107 |
+
distribution, then any Derivative Works that You distribute must
|
108 |
+
include a readable copy of the attribution notices contained
|
109 |
+
within such NOTICE file, excluding those notices that do not
|
110 |
+
pertain to any part of the Derivative Works, in at least one
|
111 |
+
of the following places: within a NOTICE text file distributed
|
112 |
+
as part of the Derivative Works; within the Source form or
|
113 |
+
documentation, if provided along with the Derivative Works; or,
|
114 |
+
within a display generated by the Derivative Works, if and
|
115 |
+
wherever such third-party notices normally appear. The contents
|
116 |
+
of the NOTICE file are for informational purposes only and
|
117 |
+
do not modify the License. You may add Your own attribution
|
118 |
+
notices within Derivative Works that You distribute, alongside
|
119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
120 |
+
that such additional attribution notices cannot be construed
|
121 |
+
as modifying the License.
|
122 |
+
|
123 |
+
You may add Your own copyright statement to Your modifications and
|
124 |
+
may provide additional or different license terms and conditions
|
125 |
+
for use, reproduction, or distribution of Your modifications, or
|
126 |
+
for any such Derivative Works as a whole, provided Your use,
|
127 |
+
reproduction, and distribution of the Work otherwise complies with
|
128 |
+
the conditions stated in this License.
|
129 |
+
|
130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
132 |
+
by You to the Licensor shall be under the terms and conditions of
|
133 |
+
this License, without any additional terms or conditions.
|
134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
135 |
+
the terms of any separate license agreement you may have executed
|
136 |
+
with Licensor regarding such Contributions.
|
137 |
+
|
138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
140 |
+
except as required for reasonable and customary use in describing the
|
141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
142 |
+
|
143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
144 |
+
agreed to in writing, Licensor provides the Work (and each
|
145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
147 |
+
implied, including, without limitation, any warranties or conditions
|
148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
150 |
+
appropriateness of using or redistributing the Work and assume any
|
151 |
+
risks associated with Your exercise of permissions under this License.
|
152 |
+
|
153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
154 |
+
whether in tort (including negligence), contract, or otherwise,
|
155 |
+
unless required by applicable law (such as deliberate and grossly
|
156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
157 |
+
liable to You for damages, including any direct, indirect, special,
|
158 |
+
incidental, or consequential damages of any character arising as a
|
159 |
+
result of this License or out of the use or inability to use the
|
160 |
+
Work (including but not limited to damages for loss of goodwill,
|
161 |
+
work stoppage, computer failure or malfunction, or any and all
|
162 |
+
other commercial damages or losses), even if such Contributor
|
163 |
+
has been advised of the possibility of such damages.
|
164 |
+
|
165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
168 |
+
or other liability obligations and/or rights consistent with this
|
169 |
+
License. However, in accepting such obligations, You may act only
|
170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
171 |
+
of any other Contributor, and only if You agree to indemnify,
|
172 |
+
defend, and hold each Contributor harmless for any liability
|
173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
174 |
+
of your accepting any such warranty or additional liability.
|
175 |
+
|
176 |
+
END OF TERMS AND CONDITIONS
|
177 |
+
|
178 |
+
APPENDIX: How to apply the Apache License to your work.
|
179 |
+
|
180 |
+
To apply the Apache License to your work, attach the following
|
181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
182 |
+
replaced with your own identifying information. (Don't include
|
183 |
+
the brackets!) The text should be enclosed in the appropriate
|
184 |
+
comment syntax for the file format. We also recommend that a
|
185 |
+
file or class name and description of purpose be included on the
|
186 |
+
same "printed page" as the copyright notice for easier
|
187 |
+
identification within third-party archives.
|
188 |
+
|
189 |
+
Copyright [yyyy] [name of copyright owner]
|
190 |
+
|
191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
192 |
+
you may not use this file except in compliance with the License.
|
193 |
+
You may obtain a copy of the License at
|
194 |
+
|
195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
196 |
+
|
197 |
+
Unless required by applicable law or agreed to in writing, software
|
198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
200 |
+
See the License for the specific language governing permissions and
|
201 |
+
limitations under the License.
|
llm/Lib/site-packages/accelerate-0.29.3.dist-info/METADATA
ADDED
@@ -0,0 +1,378 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Metadata-Version: 2.1
|
2 |
+
Name: accelerate
|
3 |
+
Version: 0.29.3
|
4 |
+
Summary: Accelerate
|
5 |
+
Home-page: https://github.com/huggingface/accelerate
|
6 |
+
Author: The HuggingFace team
|
7 |
+
Author-email: [email protected]
|
8 |
+
License: Apache
|
9 |
+
Keywords: deep learning
|
10 |
+
Classifier: Development Status :: 5 - Production/Stable
|
11 |
+
Classifier: Intended Audience :: Developers
|
12 |
+
Classifier: Intended Audience :: Education
|
13 |
+
Classifier: Intended Audience :: Science/Research
|
14 |
+
Classifier: License :: OSI Approved :: Apache Software License
|
15 |
+
Classifier: Operating System :: OS Independent
|
16 |
+
Classifier: Programming Language :: Python :: 3
|
17 |
+
Classifier: Programming Language :: Python :: 3.8
|
18 |
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
19 |
+
Requires-Python: >=3.8.0
|
20 |
+
Description-Content-Type: text/markdown
|
21 |
+
License-File: LICENSE
|
22 |
+
Requires-Dist: numpy (>=1.17)
|
23 |
+
Requires-Dist: packaging (>=20.0)
|
24 |
+
Requires-Dist: psutil
|
25 |
+
Requires-Dist: pyyaml
|
26 |
+
Requires-Dist: torch (>=1.10.0)
|
27 |
+
Requires-Dist: huggingface-hub
|
28 |
+
Requires-Dist: safetensors (>=0.3.1)
|
29 |
+
Provides-Extra: dev
|
30 |
+
Requires-Dist: black (~=23.1) ; extra == 'dev'
|
31 |
+
Requires-Dist: hf-doc-builder (>=0.3.0) ; extra == 'dev'
|
32 |
+
Requires-Dist: ruff (~=0.2.1) ; extra == 'dev'
|
33 |
+
Requires-Dist: pytest (<=8.0.0,>=7.2.0) ; extra == 'dev'
|
34 |
+
Requires-Dist: pytest-xdist ; extra == 'dev'
|
35 |
+
Requires-Dist: pytest-subtests ; extra == 'dev'
|
36 |
+
Requires-Dist: parameterized ; extra == 'dev'
|
37 |
+
Requires-Dist: datasets ; extra == 'dev'
|
38 |
+
Requires-Dist: evaluate ; extra == 'dev'
|
39 |
+
Requires-Dist: torchpippy (>=0.2.0) ; extra == 'dev'
|
40 |
+
Requires-Dist: transformers ; extra == 'dev'
|
41 |
+
Requires-Dist: scipy ; extra == 'dev'
|
42 |
+
Requires-Dist: scikit-learn ; extra == 'dev'
|
43 |
+
Requires-Dist: deepspeed ; extra == 'dev'
|
44 |
+
Requires-Dist: tqdm ; extra == 'dev'
|
45 |
+
Requires-Dist: bitsandbytes ; extra == 'dev'
|
46 |
+
Requires-Dist: timm ; extra == 'dev'
|
47 |
+
Requires-Dist: rich ; extra == 'dev'
|
48 |
+
Provides-Extra: docs
|
49 |
+
Provides-Extra: quality
|
50 |
+
Requires-Dist: black (~=23.1) ; extra == 'quality'
|
51 |
+
Requires-Dist: hf-doc-builder (>=0.3.0) ; extra == 'quality'
|
52 |
+
Requires-Dist: ruff (~=0.2.1) ; extra == 'quality'
|
53 |
+
Provides-Extra: rich
|
54 |
+
Requires-Dist: rich ; extra == 'rich'
|
55 |
+
Provides-Extra: sagemaker
|
56 |
+
Requires-Dist: sagemaker ; extra == 'sagemaker'
|
57 |
+
Provides-Extra: test_dev
|
58 |
+
Requires-Dist: datasets ; extra == 'test_dev'
|
59 |
+
Requires-Dist: evaluate ; extra == 'test_dev'
|
60 |
+
Requires-Dist: torchpippy (>=0.2.0) ; extra == 'test_dev'
|
61 |
+
Requires-Dist: transformers ; extra == 'test_dev'
|
62 |
+
Requires-Dist: scipy ; extra == 'test_dev'
|
63 |
+
Requires-Dist: scikit-learn ; extra == 'test_dev'
|
64 |
+
Requires-Dist: deepspeed ; extra == 'test_dev'
|
65 |
+
Requires-Dist: tqdm ; extra == 'test_dev'
|
66 |
+
Requires-Dist: bitsandbytes ; extra == 'test_dev'
|
67 |
+
Requires-Dist: timm ; extra == 'test_dev'
|
68 |
+
Provides-Extra: test_prod
|
69 |
+
Requires-Dist: pytest (<=8.0.0,>=7.2.0) ; extra == 'test_prod'
|
70 |
+
Requires-Dist: pytest-xdist ; extra == 'test_prod'
|
71 |
+
Requires-Dist: pytest-subtests ; extra == 'test_prod'
|
72 |
+
Requires-Dist: parameterized ; extra == 'test_prod'
|
73 |
+
Provides-Extra: test_trackers
|
74 |
+
Requires-Dist: wandb ; extra == 'test_trackers'
|
75 |
+
Requires-Dist: comet-ml ; extra == 'test_trackers'
|
76 |
+
Requires-Dist: tensorboard ; extra == 'test_trackers'
|
77 |
+
Requires-Dist: dvclive ; extra == 'test_trackers'
|
78 |
+
Provides-Extra: testing
|
79 |
+
Requires-Dist: pytest (<=8.0.0,>=7.2.0) ; extra == 'testing'
|
80 |
+
Requires-Dist: pytest-xdist ; extra == 'testing'
|
81 |
+
Requires-Dist: pytest-subtests ; extra == 'testing'
|
82 |
+
Requires-Dist: parameterized ; extra == 'testing'
|
83 |
+
Requires-Dist: datasets ; extra == 'testing'
|
84 |
+
Requires-Dist: evaluate ; extra == 'testing'
|
85 |
+
Requires-Dist: torchpippy (>=0.2.0) ; extra == 'testing'
|
86 |
+
Requires-Dist: transformers ; extra == 'testing'
|
87 |
+
Requires-Dist: scipy ; extra == 'testing'
|
88 |
+
Requires-Dist: scikit-learn ; extra == 'testing'
|
89 |
+
Requires-Dist: deepspeed ; extra == 'testing'
|
90 |
+
Requires-Dist: tqdm ; extra == 'testing'
|
91 |
+
Requires-Dist: bitsandbytes ; extra == 'testing'
|
92 |
+
Requires-Dist: timm ; extra == 'testing'
|
93 |
+
|
94 |
+
<!---
|
95 |
+
Copyright 2021 The HuggingFace Team. All rights reserved.
|
96 |
+
|
97 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
98 |
+
you may not use this file except in compliance with the License.
|
99 |
+
You may obtain a copy of the License at
|
100 |
+
|
101 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
102 |
+
|
103 |
+
Unless required by applicable law or agreed to in writing, software
|
104 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
105 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
106 |
+
See the License for the specific language governing permissions and
|
107 |
+
limitations under the License.
|
108 |
+
-->
|
109 |
+
|
110 |
+
<p align="center">
|
111 |
+
<br>
|
112 |
+
<img src="https://raw.githubusercontent.com/huggingface/accelerate/main/docs/source/imgs/accelerate_logo.png" width="400"/>
|
113 |
+
<br>
|
114 |
+
<p>
|
115 |
+
|
116 |
+
<p align="center">
|
117 |
+
<!-- Uncomment when CircleCI is set up
|
118 |
+
<a href="https://circleci.com/gh/huggingface/accelerate">
|
119 |
+
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/master">
|
120 |
+
</a>
|
121 |
+
-->
|
122 |
+
<a href="https://github.com/huggingface/accelerate/blob/main/LICENSE">
|
123 |
+
<img alt="License" src="https://img.shields.io/github/license/huggingface/accelerate.svg?color=blue">
|
124 |
+
</a>
|
125 |
+
<a href="https://huggingface.co/docs/accelerate/index.html">
|
126 |
+
<img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/accelerate/index.html.svg?down_color=red&down_message=offline&up_message=online">
|
127 |
+
</a>
|
128 |
+
<a href="https://github.com/huggingface/accelerate/releases">
|
129 |
+
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/accelerate.svg">
|
130 |
+
</a>
|
131 |
+
<a href="https://github.com/huggingface/accelerate/blob/main/CODE_OF_CONDUCT.md">
|
132 |
+
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
|
133 |
+
</a>
|
134 |
+
</p>
|
135 |
+
|
136 |
+
<h3 align="center">
|
137 |
+
<p>Run your *raw* PyTorch training script on any kind of device
|
138 |
+
</h3>
|
139 |
+
|
140 |
+
<h3 align="center">
|
141 |
+
<a href="https://hf.co/course"><img src="https://raw.githubusercontent.com/huggingface/accelerate/main/docs/source/imgs/course_banner.png"></a>
|
142 |
+
</h3>
|
143 |
+
|
144 |
+
## Easy to integrate
|
145 |
+
|
146 |
+
🤗 Accelerate was created for PyTorch users who like to write the training loop of PyTorch models but are reluctant to write and maintain the boilerplate code needed to use multi-GPUs/TPU/fp16.
|
147 |
+
|
148 |
+
🤗 Accelerate abstracts exactly and only the boilerplate code related to multi-GPUs/TPU/fp16 and leaves the rest of your code unchanged.
|
149 |
+
|
150 |
+
Here is an example:
|
151 |
+
|
152 |
+
```diff
|
153 |
+
import torch
|
154 |
+
import torch.nn.functional as F
|
155 |
+
from datasets import load_dataset
|
156 |
+
+ from accelerate import Accelerator
|
157 |
+
|
158 |
+
+ accelerator = Accelerator()
|
159 |
+
- device = 'cpu'
|
160 |
+
+ device = accelerator.device
|
161 |
+
|
162 |
+
model = torch.nn.Transformer().to(device)
|
163 |
+
optimizer = torch.optim.Adam(model.parameters())
|
164 |
+
|
165 |
+
dataset = load_dataset('my_dataset')
|
166 |
+
data = torch.utils.data.DataLoader(dataset, shuffle=True)
|
167 |
+
|
168 |
+
+ model, optimizer, data = accelerator.prepare(model, optimizer, data)
|
169 |
+
|
170 |
+
model.train()
|
171 |
+
for epoch in range(10):
|
172 |
+
for source, targets in data:
|
173 |
+
source = source.to(device)
|
174 |
+
targets = targets.to(device)
|
175 |
+
|
176 |
+
optimizer.zero_grad()
|
177 |
+
|
178 |
+
output = model(source)
|
179 |
+
loss = F.cross_entropy(output, targets)
|
180 |
+
|
181 |
+
- loss.backward()
|
182 |
+
+ accelerator.backward(loss)
|
183 |
+
|
184 |
+
optimizer.step()
|
185 |
+
```
|
186 |
+
|
187 |
+
As you can see in this example, by adding 5-lines to any standard PyTorch training script you can now run on any kind of single or distributed node setting (single CPU, single GPU, multi-GPUs and TPUs) as well as with or without mixed precision (fp8, fp16, bf16).
|
188 |
+
|
189 |
+
In particular, the same code can then be run without modification on your local machine for debugging or your training environment.
|
190 |
+
|
191 |
+
🤗 Accelerate even handles the device placement for you (which requires a few more changes to your code, but is safer in general), so you can even simplify your training loop further:
|
192 |
+
|
193 |
+
```diff
|
194 |
+
import torch
|
195 |
+
import torch.nn.functional as F
|
196 |
+
from datasets import load_dataset
|
197 |
+
+ from accelerate import Accelerator
|
198 |
+
|
199 |
+
- device = 'cpu'
|
200 |
+
+ accelerator = Accelerator()
|
201 |
+
|
202 |
+
- model = torch.nn.Transformer().to(device)
|
203 |
+
+ model = torch.nn.Transformer()
|
204 |
+
optimizer = torch.optim.Adam(model.parameters())
|
205 |
+
|
206 |
+
dataset = load_dataset('my_dataset')
|
207 |
+
data = torch.utils.data.DataLoader(dataset, shuffle=True)
|
208 |
+
|
209 |
+
+ model, optimizer, data = accelerator.prepare(model, optimizer, data)
|
210 |
+
|
211 |
+
model.train()
|
212 |
+
for epoch in range(10):
|
213 |
+
for source, targets in data:
|
214 |
+
- source = source.to(device)
|
215 |
+
- targets = targets.to(device)
|
216 |
+
|
217 |
+
optimizer.zero_grad()
|
218 |
+
|
219 |
+
output = model(source)
|
220 |
+
loss = F.cross_entropy(output, targets)
|
221 |
+
|
222 |
+
- loss.backward()
|
223 |
+
+ accelerator.backward(loss)
|
224 |
+
|
225 |
+
optimizer.step()
|
226 |
+
```
|
227 |
+
|
228 |
+
Want to learn more? Check out the [documentation](https://huggingface.co/docs/accelerate) or have a look at our [examples](https://github.com/huggingface/accelerate/tree/main/examples).
|
229 |
+
|
230 |
+
## Launching script
|
231 |
+
|
232 |
+
🤗 Accelerate also provides an optional CLI tool that allows you to quickly configure and test your training environment before launching the scripts. No need to remember how to use `torch.distributed.run` or to write a specific launcher for TPU training!
|
233 |
+
On your machine(s) just run:
|
234 |
+
|
235 |
+
```bash
|
236 |
+
accelerate config
|
237 |
+
```
|
238 |
+
|
239 |
+
and answer the questions asked. This will generate a config file that will be used automatically to properly set the default options when doing
|
240 |
+
|
241 |
+
```bash
|
242 |
+
accelerate launch my_script.py --args_to_my_script
|
243 |
+
```
|
244 |
+
|
245 |
+
For instance, here is how you would run the GLUE example on the MRPC task (from the root of the repo):
|
246 |
+
|
247 |
+
```bash
|
248 |
+
accelerate launch examples/nlp_example.py
|
249 |
+
```
|
250 |
+
|
251 |
+
This CLI tool is **optional**, and you can still use `python my_script.py` or `python -m torchrun my_script.py` at your convenience.
|
252 |
+
|
253 |
+
You can also directly pass in the arguments you would to `torchrun` as arguments to `accelerate launch` if you wish to not run` accelerate config`.
|
254 |
+
|
255 |
+
For example, here is how to launch on two GPUs:
|
256 |
+
|
257 |
+
```bash
|
258 |
+
accelerate launch --multi_gpu --num_processes 2 examples/nlp_example.py
|
259 |
+
```
|
260 |
+
|
261 |
+
To learn more, check the CLI documentation available [here](https://huggingface.co/docs/accelerate/package_reference/cli).
|
262 |
+
|
263 |
+
## Launching multi-CPU run using MPI
|
264 |
+
|
265 |
+
🤗 Here is another way to launch multi-CPU run using MPI. You can learn how to install Open MPI on [this page](https://www.open-mpi.org/faq/?category=building#easy-build). You can use Intel MPI or MVAPICH as well.
|
266 |
+
Once you have MPI setup on your cluster, just run:
|
267 |
+
```bash
|
268 |
+
accelerate config
|
269 |
+
```
|
270 |
+
Answer the questions that are asked, selecting to run using multi-CPU, and answer "yes" when asked if you want accelerate to launch mpirun.
|
271 |
+
Then, use `accelerate launch` with your script like:
|
272 |
+
```bash
|
273 |
+
accelerate launch examples/nlp_example.py
|
274 |
+
```
|
275 |
+
Alternatively, you can use mpirun directly, without using the CLI like:
|
276 |
+
```bash
|
277 |
+
mpirun -np 2 python examples/nlp_example.py
|
278 |
+
```
|
279 |
+
|
280 |
+
## Launching training using DeepSpeed
|
281 |
+
|
282 |
+
🤗 Accelerate supports training on single/multiple GPUs using DeepSpeed. To use it, you don't need to change anything in your training code; you can set everything using just `accelerate config`. However, if you desire to tweak your DeepSpeed related args from your Python script, we provide you the `DeepSpeedPlugin`.
|
283 |
+
|
284 |
+
```python
|
285 |
+
from accelerate import Accelerator, DeepSpeedPlugin
|
286 |
+
|
287 |
+
# deepspeed needs to know your gradient accumulation steps beforehand, so don't forget to pass it
|
288 |
+
# Remember you still need to do gradient accumulation by yourself, just like you would have done without deepspeed
|
289 |
+
deepspeed_plugin = DeepSpeedPlugin(zero_stage=2, gradient_accumulation_steps=2)
|
290 |
+
accelerator = Accelerator(mixed_precision='fp16', deepspeed_plugin=deepspeed_plugin)
|
291 |
+
|
292 |
+
# How to save your 🤗 Transformer?
|
293 |
+
accelerator.wait_for_everyone()
|
294 |
+
unwrapped_model = accelerator.unwrap_model(model)
|
295 |
+
unwrapped_model.save_pretrained(save_dir, save_function=accelerator.save, state_dict=accelerator.get_state_dict(model))
|
296 |
+
```
|
297 |
+
|
298 |
+
Note: DeepSpeed support is experimental for now. In case you get into some problem, please open an issue.
|
299 |
+
|
300 |
+
## Launching your training from a notebook
|
301 |
+
|
302 |
+
🤗 Accelerate also provides a `notebook_launcher` function you can use in a notebook to launch a distributed training. This is especially useful for Colab or Kaggle notebooks with a TPU backend. Just define your training loop in a `training_function` then in your last cell, add:
|
303 |
+
|
304 |
+
```python
|
305 |
+
from accelerate import notebook_launcher
|
306 |
+
|
307 |
+
notebook_launcher(training_function)
|
308 |
+
```
|
309 |
+
|
310 |
+
An example can be found in [this notebook](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb). [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_nlp_example.ipynb)
|
311 |
+
|
312 |
+
## Why should I use 🤗 Accelerate?
|
313 |
+
|
314 |
+
You should use 🤗 Accelerate when you want to easily run your training scripts in a distributed environment without having to renounce full control over your training loop. This is not a high-level framework above PyTorch, just a thin wrapper so you don't have to learn a new library. In fact, the whole API of 🤗 Accelerate is in one class, the `Accelerator` object.
|
315 |
+
|
316 |
+
## Why shouldn't I use 🤗 Accelerate?
|
317 |
+
|
318 |
+
You shouldn't use 🤗 Accelerate if you don't want to write a training loop yourself. There are plenty of high-level libraries above PyTorch that will offer you that, 🤗 Accelerate is not one of them.
|
319 |
+
|
320 |
+
## Frameworks using 🤗 Accelerate
|
321 |
+
|
322 |
+
If you like the simplicity of 🤗 Accelerate but would prefer a higher-level abstraction around its capabilities, some frameworks and libraries that are built on top of 🤗 Accelerate are listed below:
|
323 |
+
|
324 |
+
* [Amphion](https://github.com/open-mmlab/Amphion) is a toolkit for Audio, Music, and Speech Generation. Its purpose is to support reproducible research and help junior researchers and engineers get started in the field of audio, music, and speech generation research and development.
|
325 |
+
* [Animus](https://github.com/Scitator/animus) is a minimalistic framework to run machine learning experiments. Animus highlights common "breakpoints" in ML experiments and provides a unified interface for them within [IExperiment](https://github.com/Scitator/animus/blob/main/animus/core.py#L76).
|
326 |
+
* [Catalyst](https://github.com/catalyst-team/catalyst#getting-started) is a PyTorch framework for Deep Learning Research and Development. It focuses on reproducibility, rapid experimentation, and codebase reuse so you can create something new rather than write yet another train loop. Catalyst provides a [Runner](https://catalyst-team.github.io/catalyst/api/core.html#runner) to connect all parts of the experiment: hardware backend, data transformations, model training, and inference logic.
|
327 |
+
* [fastai](https://github.com/fastai/fastai#installing) is a PyTorch framework for Deep Learning that simplifies training fast and accurate neural nets using modern best practices. fastai provides a [Learner](https://docs.fast.ai/learner.html#Learner) to handle the training, fine-tuning, and inference of deep learning algorithms.
|
328 |
+
* [Finetuner](https://github.com/jina-ai/finetuner) is a service that enables models to create higher-quality embeddings for semantic search, visual similarity search, cross-modal text<->image search, recommendation systems, clustering, duplication detection, anomaly detection, or other uses.
|
329 |
+
* [InvokeAI](https://github.com/invoke-ai/InvokeAI) is a creative engine for Stable Diffusion models, offering industry-leading WebUI, terminal usage support, and serves as the foundation for many commercial products.
|
330 |
+
* [Kornia](https://kornia.readthedocs.io/en/latest/get-started/introduction.html) is a differentiable library that allows classical computer vision to be integrated into deep learning models. Kornia provides a [Trainer](https://kornia.readthedocs.io/en/latest/x.html#kornia.x.Trainer) with the specific purpose to train and fine-tune the supported deep learning algorithms within the library.
|
331 |
+
* [Open Assistant](https://projects.laion.ai/Open-Assistant/) is a chat-based assistant that understands tasks, can interact with their party systems, and retrieve information dynamically to do so.
|
332 |
+
* [pytorch-accelerated](https://github.com/Chris-hughes10/pytorch-accelerated) is a lightweight training library, with a streamlined feature set centered around a general-purpose [Trainer](https://pytorch-accelerated.readthedocs.io/en/latest/trainer.html), that places a huge emphasis on simplicity and transparency; enabling users to understand exactly what is going on under the hood, but without having to write and maintain the boilerplate themselves!
|
333 |
+
* [Stable Diffusion web UI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) is an open-source browser-based easy-to-use interface based on the Gradio library for Stable Diffusion.
|
334 |
+
* [torchkeras](https://github.com/lyhue1991/torchkeras) is a simple tool for training pytorch model just in a keras style, a dynamic and beautiful plot is provided in notebook to monitor your loss or metric.
|
335 |
+
* [transformers](https://github.com/huggingface/transformers) as a tool for helping train state-of-the-art machine learning models in PyTorch, Tensorflow, and JAX. (Accelerate is the backend for the PyTorch side).
|
336 |
+
|
337 |
+
|
338 |
+
## Installation
|
339 |
+
|
340 |
+
This repository is tested on Python 3.8+ and PyTorch 1.10.0+
|
341 |
+
|
342 |
+
You should install 🤗 Accelerate in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/).
|
343 |
+
|
344 |
+
First, create a virtual environment with the version of Python you're going to use and activate it.
|
345 |
+
|
346 |
+
Then, you will need to install PyTorch: refer to the [official installation page](https://pytorch.org/get-started/locally/#start-locally) regarding the specific install command for your platform. Then 🤗 Accelerate can be installed using pip as follows:
|
347 |
+
|
348 |
+
```bash
|
349 |
+
pip install accelerate
|
350 |
+
```
|
351 |
+
|
352 |
+
## Supported integrations
|
353 |
+
|
354 |
+
- CPU only
|
355 |
+
- multi-CPU on one node (machine)
|
356 |
+
- multi-CPU on several nodes (machines)
|
357 |
+
- single GPU
|
358 |
+
- multi-GPU on one node (machine)
|
359 |
+
- multi-GPU on several nodes (machines)
|
360 |
+
- TPU
|
361 |
+
- FP16/BFloat16 mixed precision
|
362 |
+
- FP8 mixed precision with [Transformer Engine](https://github.com/NVIDIA/TransformerEngine)
|
363 |
+
- DeepSpeed support (Experimental)
|
364 |
+
- PyTorch Fully Sharded Data Parallel (FSDP) support (Experimental)
|
365 |
+
- Megatron-LM support (Experimental)
|
366 |
+
|
367 |
+
## Citing 🤗 Accelerate
|
368 |
+
|
369 |
+
If you use 🤗 Accelerate in your publication, please cite it by using the following BibTeX entry.
|
370 |
+
|
371 |
+
```bibtex
|
372 |
+
@Misc{accelerate,
|
373 |
+
title = {Accelerate: Training and inference at scale made simple, efficient and adaptable.},
|
374 |
+
author = {Sylvain Gugger and Lysandre Debut and Thomas Wolf and Philipp Schmid and Zachary Mueller and Sourab Mangrulkar and Marc Sun and Benjamin Bossan},
|
375 |
+
howpublished = {\url{https://github.com/huggingface/accelerate}},
|
376 |
+
year = {2022}
|
377 |
+
}
|
378 |
+
```
|
llm/Lib/site-packages/accelerate-0.29.3.dist-info/RECORD
ADDED
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
../../Scripts/accelerate-config.exe,sha256=Vc33m1EHdtjr3Go97mmzD40pHC0udwU7e2YlKu-NM6Q,108393
|
2 |
+
../../Scripts/accelerate-estimate-memory.exe,sha256=X-r55bc3vnkw-IVoXywYuIOAVJgqbUiCHJcbDW2hPXA,108395
|
3 |
+
../../Scripts/accelerate-launch.exe,sha256=t4JMeZ4RvYQdn-ldK88qgDsWQx1l12rMbomdlG5IFHU,108393
|
4 |
+
../../Scripts/accelerate.exe,sha256=pReYe3Amm_rR1mdNNM1ghovR9bfE0U1BvtFmpT8cEHM,108401
|
5 |
+
accelerate-0.29.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
6 |
+
accelerate-0.29.3.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
7 |
+
accelerate-0.29.3.dist-info/METADATA,sha256=DNiQffLlP8RQMZZvHtnR0loDVK60yC3FhB8UQKkthgo,18942
|
8 |
+
accelerate-0.29.3.dist-info/RECORD,,
|
9 |
+
accelerate-0.29.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10 |
+
accelerate-0.29.3.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
|
11 |
+
accelerate-0.29.3.dist-info/entry_points.txt,sha256=Z_KV59tIt4oZtUDEQ0w8JThJ6_1dd8vR8heH24DeAXI,238
|
12 |
+
accelerate-0.29.3.dist-info/top_level.txt,sha256=esVfdxTidsjQ90zsN_rPpjLFJ4ijRlx4mnLrG09hlt4,11
|
13 |
+
accelerate/__init__.py,sha256=UUqSsQQDFMm6aAZGCgNyrbTFPtwkguZA2KnoPb0XbWo,1456
|
14 |
+
accelerate/__pycache__/__init__.cpython-311.pyc,,
|
15 |
+
accelerate/__pycache__/accelerator.cpython-311.pyc,,
|
16 |
+
accelerate/__pycache__/big_modeling.cpython-311.pyc,,
|
17 |
+
accelerate/__pycache__/checkpointing.cpython-311.pyc,,
|
18 |
+
accelerate/__pycache__/data_loader.cpython-311.pyc,,
|
19 |
+
accelerate/__pycache__/hooks.cpython-311.pyc,,
|
20 |
+
accelerate/__pycache__/inference.cpython-311.pyc,,
|
21 |
+
accelerate/__pycache__/launchers.cpython-311.pyc,,
|
22 |
+
accelerate/__pycache__/local_sgd.cpython-311.pyc,,
|
23 |
+
accelerate/__pycache__/logging.cpython-311.pyc,,
|
24 |
+
accelerate/__pycache__/memory_utils.cpython-311.pyc,,
|
25 |
+
accelerate/__pycache__/optimizer.cpython-311.pyc,,
|
26 |
+
accelerate/__pycache__/scheduler.cpython-311.pyc,,
|
27 |
+
accelerate/__pycache__/state.cpython-311.pyc,,
|
28 |
+
accelerate/__pycache__/tracking.cpython-311.pyc,,
|
29 |
+
accelerate/accelerator.py,sha256=rh4-KBMCkCGLldjKo1CRtBIbsXG76fJqYWdgOugaw7w,143024
|
30 |
+
accelerate/big_modeling.py,sha256=pmtLTKTf8mJK1E2o51E3H5TBAuw_zLX_7pWtogtbP1w,29278
|
31 |
+
accelerate/checkpointing.py,sha256=vFyLNg9-8qsPBYhAkcm-WwKEeK5Lrq9qLrQWNGFKoPk,11378
|
32 |
+
accelerate/commands/__init__.py,sha256=m1PPTDT4ziIAvM0-FDSgIMIZ69Konn126s6LwuzH6v8,606
|
33 |
+
accelerate/commands/__pycache__/__init__.cpython-311.pyc,,
|
34 |
+
accelerate/commands/__pycache__/accelerate_cli.cpython-311.pyc,,
|
35 |
+
accelerate/commands/__pycache__/env.cpython-311.pyc,,
|
36 |
+
accelerate/commands/__pycache__/estimate.cpython-311.pyc,,
|
37 |
+
accelerate/commands/__pycache__/launch.cpython-311.pyc,,
|
38 |
+
accelerate/commands/__pycache__/test.cpython-311.pyc,,
|
39 |
+
accelerate/commands/__pycache__/tpu.cpython-311.pyc,,
|
40 |
+
accelerate/commands/__pycache__/utils.cpython-311.pyc,,
|
41 |
+
accelerate/commands/accelerate_cli.py,sha256=i3nge5Wj8i4zkV0CVIk9P8veleRZbTZY0AU4fJOrKF8,1749
|
42 |
+
accelerate/commands/config/__init__.py,sha256=iJK8dgj3pc5Vdr1E7UuGoFu-BlybyXLxYDoTg9gXngE,1645
|
43 |
+
accelerate/commands/config/__pycache__/__init__.cpython-311.pyc,,
|
44 |
+
accelerate/commands/config/__pycache__/cluster.cpython-311.pyc,,
|
45 |
+
accelerate/commands/config/__pycache__/config.cpython-311.pyc,,
|
46 |
+
accelerate/commands/config/__pycache__/config_args.cpython-311.pyc,,
|
47 |
+
accelerate/commands/config/__pycache__/config_utils.cpython-311.pyc,,
|
48 |
+
accelerate/commands/config/__pycache__/default.cpython-311.pyc,,
|
49 |
+
accelerate/commands/config/__pycache__/sagemaker.cpython-311.pyc,,
|
50 |
+
accelerate/commands/config/__pycache__/update.cpython-311.pyc,,
|
51 |
+
accelerate/commands/config/cluster.py,sha256=lA55beGeo0fAowfffKhf8nGcy6lBjaOxTtV-Yg_Rz6s,29926
|
52 |
+
accelerate/commands/config/config.py,sha256=FuRlQvOjgATEtyqOSsGD-KEtOCvACOHjs2C-krrtldk,3035
|
53 |
+
accelerate/commands/config/config_args.py,sha256=hE42coVnn0UU-ysqp2ZH-jlqaXoPaHt5E_3qxT42GIM,10024
|
54 |
+
accelerate/commands/config/config_utils.py,sha256=DcjIV1mDInFmct2_XQ-9KYAkREINs6YuHRbZe5HFjT8,2926
|
55 |
+
accelerate/commands/config/default.py,sha256=3-SdEhl_zXM9S3f-FxkSVtiBQ5VY-QNsC4O26u60bss,5350
|
56 |
+
accelerate/commands/config/sagemaker.py,sha256=GjHE2-h4tRr1P_PFtMF3miiAtJlzkbHbMb6kFXqn8eo,10341
|
57 |
+
accelerate/commands/config/update.py,sha256=NXW1J7GkUHpg71QlIXsmMB_0z8S8IZo2FWax5POwrhc,2395
|
58 |
+
accelerate/commands/env.py,sha256=HXXUozMFlxs0b-bU2a3nEcXwYz-5EBkfCvE9svqeN2U,3595
|
59 |
+
accelerate/commands/estimate.py,sha256=shEn2nXyHmz94zpAzV2R8__lcNYW9f9djl7bOHoo04k,12398
|
60 |
+
accelerate/commands/launch.py,sha256=rYmkdc0Kbcux4TOqBG_sJN-NNc4nmV90vuwHqhGNfWw,41439
|
61 |
+
accelerate/commands/menu/__init__.py,sha256=uqSlBM0TFHBwzdv3p3SXfpAk1lZFp4h1a7mbBdscPHs,645
|
62 |
+
accelerate/commands/menu/__pycache__/__init__.cpython-311.pyc,,
|
63 |
+
accelerate/commands/menu/__pycache__/cursor.cpython-311.pyc,,
|
64 |
+
accelerate/commands/menu/__pycache__/helpers.cpython-311.pyc,,
|
65 |
+
accelerate/commands/menu/__pycache__/input.cpython-311.pyc,,
|
66 |
+
accelerate/commands/menu/__pycache__/keymap.cpython-311.pyc,,
|
67 |
+
accelerate/commands/menu/__pycache__/selection_menu.cpython-311.pyc,,
|
68 |
+
accelerate/commands/menu/cursor.py,sha256=-lmpJVAzvNc0c3EOtSuLoKB59zqylVCbYyWLPnrOmvQ,2028
|
69 |
+
accelerate/commands/menu/helpers.py,sha256=KrSB5fJjH4MUEUAQJ6bYaN16AYcnl9UalDrPD3DYeeg,1483
|
70 |
+
accelerate/commands/menu/input.py,sha256=Uj9eDp8-Mb0Fe49nuogqo9W_RCfYd6udfjiPKx7Wjmg,2537
|
71 |
+
accelerate/commands/menu/keymap.py,sha256=eXj-suyYs1m5dEHoUKN4mKAMLc8DWHnwhP6G6JSU0jQ,4086
|
72 |
+
accelerate/commands/menu/selection_menu.py,sha256=bxy-DHaKKC6SCToOlMBv5_z0MdUzylEg6Sio9OuV3GM,4921
|
73 |
+
accelerate/commands/test.py,sha256=YrPYEaAACOGZ6btn2MV6NbMSEdBUcMWADLbQWaZSHtk,2149
|
74 |
+
accelerate/commands/tpu.py,sha256=KyxDP7IuveidZrbW4rx2s8Ku3o_ptI6tzwr_R7ck0os,5548
|
75 |
+
accelerate/commands/utils.py,sha256=ilcfE32oHh28EToM00nc_SR6upfZiuxUI0AjjZu8KYY,3995
|
76 |
+
accelerate/data_loader.py,sha256=qQojnHAW0cjTL7jLQN_g-oHlRZBkKzti3ifk84Izuw4,48307
|
77 |
+
accelerate/hooks.py,sha256=x0FBwwoy6PKSwulavYTpc4gERIoB7RHGPF0Qe6qjXNA,31244
|
78 |
+
accelerate/inference.py,sha256=Ci7kkw2cocNpuvmbo1ytW2QgcI_HKWoXkIdonFOr0tg,7977
|
79 |
+
accelerate/launchers.py,sha256=iFDZ7seDdRwHAHy1BbVPmPccAONiPdV2aBOHNuT2ZD8,11375
|
80 |
+
accelerate/local_sgd.py,sha256=v0-AxldUSCYCI-rqjLiEHsVtSqyEIWTC5ppn7CW7qfY,4002
|
81 |
+
accelerate/logging.py,sha256=kvUvk33r_7T2BNzIwqRZBOhuC-50Ju4rm4HbsM6h2G8,4897
|
82 |
+
accelerate/memory_utils.py,sha256=3R5LoeHl6GgTZ-IMPrDZMdaEehWarGdPqODushb-6pg,862
|
83 |
+
accelerate/optimizer.py,sha256=H7e1XwEysZ_GFR8V_3bHjFAY7zzrzO8samCyW_r7dZo,7453
|
84 |
+
accelerate/scheduler.py,sha256=des_4M_Tt1W8gCYZZbLla0GHBEgJY3Wx2EGBQPTzeiY,4238
|
85 |
+
accelerate/state.py,sha256=yOpKq0xf-yY7qPeQMKWqG05PiU_uUsIkyGqyAlOIJNQ,50409
|
86 |
+
accelerate/test_utils/__init__.py,sha256=amEDYw-ztgIvHkYT3mv3ixk1QJirUnf6jfPJzqUUYkQ,1459
|
87 |
+
accelerate/test_utils/__pycache__/__init__.cpython-311.pyc,,
|
88 |
+
accelerate/test_utils/__pycache__/examples.cpython-311.pyc,,
|
89 |
+
accelerate/test_utils/__pycache__/testing.cpython-311.pyc,,
|
90 |
+
accelerate/test_utils/__pycache__/training.cpython-311.pyc,,
|
91 |
+
accelerate/test_utils/examples.py,sha256=jRm1S9TkmeoLaqprBvtVFN4LesiaDZtKMNIoLNY2euw,7281
|
92 |
+
accelerate/test_utils/scripts/__init__.py,sha256=m1PPTDT4ziIAvM0-FDSgIMIZ69Konn126s6LwuzH6v8,606
|
93 |
+
accelerate/test_utils/scripts/__pycache__/__init__.cpython-311.pyc,,
|
94 |
+
accelerate/test_utils/scripts/__pycache__/test_cli.cpython-311.pyc,,
|
95 |
+
accelerate/test_utils/scripts/__pycache__/test_distributed_data_loop.cpython-311.pyc,,
|
96 |
+
accelerate/test_utils/scripts/__pycache__/test_notebook.cpython-311.pyc,,
|
97 |
+
accelerate/test_utils/scripts/__pycache__/test_ops.cpython-311.pyc,,
|
98 |
+
accelerate/test_utils/scripts/__pycache__/test_script.cpython-311.pyc,,
|
99 |
+
accelerate/test_utils/scripts/__pycache__/test_sync.cpython-311.pyc,,
|
100 |
+
accelerate/test_utils/scripts/external_deps/__init__.py,sha256=m1PPTDT4ziIAvM0-FDSgIMIZ69Konn126s6LwuzH6v8,606
|
101 |
+
accelerate/test_utils/scripts/external_deps/__pycache__/__init__.cpython-311.pyc,,
|
102 |
+
accelerate/test_utils/scripts/external_deps/__pycache__/test_checkpointing.cpython-311.pyc,,
|
103 |
+
accelerate/test_utils/scripts/external_deps/__pycache__/test_metrics.cpython-311.pyc,,
|
104 |
+
accelerate/test_utils/scripts/external_deps/__pycache__/test_peak_memory_usage.cpython-311.pyc,,
|
105 |
+
accelerate/test_utils/scripts/external_deps/__pycache__/test_performance.cpython-311.pyc,,
|
106 |
+
accelerate/test_utils/scripts/external_deps/__pycache__/test_pippy.cpython-311.pyc,,
|
107 |
+
accelerate/test_utils/scripts/external_deps/__pycache__/test_zero3_integration.cpython-311.pyc,,
|
108 |
+
accelerate/test_utils/scripts/external_deps/test_checkpointing.py,sha256=zILzHevzqxB1NPPDrJ1furaitI8MTvhBeG9QzzL0bmE,10668
|
109 |
+
accelerate/test_utils/scripts/external_deps/test_metrics.py,sha256=67-S1qeCpCL9ceaH22RsIsBJscMS7VQWaO4Krcszzbw,12133
|
110 |
+
accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py,sha256=D0YnKCxkI4ZwDOmZ5Ev6hL9jPyP7SU4WffpVFiK14bs,11072
|
111 |
+
accelerate/test_utils/scripts/external_deps/test_performance.py,sha256=8fV3wCM1H9HVRRyC5C4EGWt-9aHILX_y3-E7LfSiv7M,9803
|
112 |
+
accelerate/test_utils/scripts/external_deps/test_pippy.py,sha256=RdMoD1rlLKMyjyl0soSqR3iDbGidS6-z5GHo3bJUOw8,4647
|
113 |
+
accelerate/test_utils/scripts/external_deps/test_zero3_integration.py,sha256=bJ0Jio-6OCyS2FIgFmZi3duqG1gbkOoTEcHsrORYIL4,1503
|
114 |
+
accelerate/test_utils/scripts/test_cli.py,sha256=qfk1aYFtdvYFCYPkl05602SNGvk08QTv0xZVVcFVtzM,833
|
115 |
+
accelerate/test_utils/scripts/test_distributed_data_loop.py,sha256=VqFPKNRu8yx2MoZ4nHy5wRocEthSymcIA2mg1knqDq8,8315
|
116 |
+
accelerate/test_utils/scripts/test_notebook.py,sha256=Q4OOWHa_GMmzwfiq71BTpKYmhCHLC02J42OO94ut9xk,1629
|
117 |
+
accelerate/test_utils/scripts/test_ops.py,sha256=BcGn3xJT2wUJ0Yk_6VLNkneSv9z24JeAoQjsgdIIRr4,6170
|
118 |
+
accelerate/test_utils/scripts/test_script.py,sha256=QyHRWvHQm1XWkAH7YilQ0gZe3zwvEkyqD6JXmneWqak,32059
|
119 |
+
accelerate/test_utils/scripts/test_sync.py,sha256=3kltq-GuUjOVuo6_FOuWiPyc5f3pGiqiwEAbex5x_-o,18263
|
120 |
+
accelerate/test_utils/testing.py,sha256=HIp7n6qPMh8KPbwEzNWu5mzfxnQRcU15EQ1AQKehpo0,20571
|
121 |
+
accelerate/test_utils/training.py,sha256=8k_YAQ21MzUdb2aFWq1t2fihW1b-iBGh1OJSL3whY68,4019
|
122 |
+
accelerate/tracking.py,sha256=WLY-H1DTsxrz4BVzle7QZMp0Irg84yFMbA1e6JaY3pM,39789
|
123 |
+
accelerate/utils/__init__.py,sha256=SEP34Od2TbTZt7AbhPJoWWDxFoNMeNEyAuVfaPgVu7k,6065
|
124 |
+
accelerate/utils/__pycache__/__init__.cpython-311.pyc,,
|
125 |
+
accelerate/utils/__pycache__/bnb.cpython-311.pyc,,
|
126 |
+
accelerate/utils/__pycache__/constants.cpython-311.pyc,,
|
127 |
+
accelerate/utils/__pycache__/dataclasses.cpython-311.pyc,,
|
128 |
+
accelerate/utils/__pycache__/deepspeed.cpython-311.pyc,,
|
129 |
+
accelerate/utils/__pycache__/environment.cpython-311.pyc,,
|
130 |
+
accelerate/utils/__pycache__/fsdp_utils.cpython-311.pyc,,
|
131 |
+
accelerate/utils/__pycache__/imports.cpython-311.pyc,,
|
132 |
+
accelerate/utils/__pycache__/launch.cpython-311.pyc,,
|
133 |
+
accelerate/utils/__pycache__/megatron_lm.cpython-311.pyc,,
|
134 |
+
accelerate/utils/__pycache__/memory.cpython-311.pyc,,
|
135 |
+
accelerate/utils/__pycache__/modeling.cpython-311.pyc,,
|
136 |
+
accelerate/utils/__pycache__/offload.cpython-311.pyc,,
|
137 |
+
accelerate/utils/__pycache__/operations.cpython-311.pyc,,
|
138 |
+
accelerate/utils/__pycache__/other.cpython-311.pyc,,
|
139 |
+
accelerate/utils/__pycache__/random.cpython-311.pyc,,
|
140 |
+
accelerate/utils/__pycache__/rich.cpython-311.pyc,,
|
141 |
+
accelerate/utils/__pycache__/torch_xla.cpython-311.pyc,,
|
142 |
+
accelerate/utils/__pycache__/tqdm.cpython-311.pyc,,
|
143 |
+
accelerate/utils/__pycache__/transformer_engine.cpython-311.pyc,,
|
144 |
+
accelerate/utils/__pycache__/versions.cpython-311.pyc,,
|
145 |
+
accelerate/utils/bnb.py,sha256=3i59dy8EcBYJEnT2alJ5_M-zeIpFsrceQ4bImiJJKOk,20570
|
146 |
+
accelerate/utils/constants.py,sha256=e6Bpf7gSZLFkvfr-1B1841b6lVoKJ5uyyf5kefe0aT4,2566
|
147 |
+
accelerate/utils/dataclasses.py,sha256=QSP-gYjXz68s0PAseKwLHRBQUnzcBQwPk80otV4X20k,74253
|
148 |
+
accelerate/utils/deepspeed.py,sha256=1JFnz-dY6xP9yHywnX8bzZNq-d-8Cpg5CvVNLZ74b_0,10276
|
149 |
+
accelerate/utils/environment.py,sha256=8eVGMCu7xT1y0Hxochnxz_RghDePtWo2TghDlOm5Gf0,10409
|
150 |
+
accelerate/utils/fsdp_utils.py,sha256=QURWBtK8D00zppqJko0yeznEovXvnkRLI0NpPPkog1Q,10667
|
151 |
+
accelerate/utils/imports.py,sha256=gYj_W3E5V83dYlSqqYE89OAK6JonzwhlcEjsJcOpB3E,12232
|
152 |
+
accelerate/utils/launch.py,sha256=hHpcnR0NrSmqaT7AIaeIeXOAJVIhWnWdq3kA1XSnOYs,27459
|
153 |
+
accelerate/utils/megatron_lm.py,sha256=IfHrtMiPSwuzh5ri96rTTIcEluuMNuIj3O8Y4jW6Fzk,57124
|
154 |
+
accelerate/utils/memory.py,sha256=VxJCU-tMX8uE34GbJnxtDXYPHh4D9p2Y-d6rkGxqSa0,5200
|
155 |
+
accelerate/utils/modeling.py,sha256=OfTHPg7oM9-jzYotLZjJKj6TrhCTFV3qOtQAOhKXmzQ,80246
|
156 |
+
accelerate/utils/offload.py,sha256=qjaVai81wbkA0YH2WkmOXvZT0BRphygfRV_4Ua4j4U4,7837
|
157 |
+
accelerate/utils/operations.py,sha256=zsmRx8mP2eoImPc42pOmBIqaHX7RDugw8AZ_HF3onpg,30610
|
158 |
+
accelerate/utils/other.py,sha256=kgON65EhzQN3oQZqzgAOmmNC2vsQkeO77qEuzN7Zv7c,12283
|
159 |
+
accelerate/utils/random.py,sha256=t-HsLQRm8etSiLSyONCU9wNhj-0VjDUyDme9p6RxDNU,4881
|
160 |
+
accelerate/utils/rich.py,sha256=8JZX_uGMQX-BufdXxJpdne7BWd1KyLHSgbiGxrDMYr8,847
|
161 |
+
accelerate/utils/torch_xla.py,sha256=Pq1tuqN0X_pWDVza6YgjfO45uoJdoRVRForLeLQzFus,1908
|
162 |
+
accelerate/utils/tqdm.py,sha256=9Ovx4GL8AvjSaBd_OysoUGPW9ZJ3ZBOde6776HMEMOA,1344
|
163 |
+
accelerate/utils/transformer_engine.py,sha256=gNPkOv_D1SDLm6nVZtxWIjyA6snxWtAQeBWUZLIErJE,3582
|
164 |
+
accelerate/utils/versions.py,sha256=UgmcbjBm--6CIx1ZamSAMjAK_B_2l48LbeaNygqej8M,2149
|
llm/Lib/site-packages/accelerate-0.29.3.dist-info/REQUESTED
ADDED
File without changes
|
llm/Lib/site-packages/accelerate-0.29.3.dist-info/WHEEL
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Wheel-Version: 1.0
|
2 |
+
Generator: bdist_wheel (0.37.1)
|
3 |
+
Root-Is-Purelib: true
|
4 |
+
Tag: py3-none-any
|
5 |
+
|
llm/Lib/site-packages/accelerate-0.29.3.dist-info/entry_points.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[console_scripts]
|
2 |
+
accelerate = accelerate.commands.accelerate_cli:main
|
3 |
+
accelerate-config = accelerate.commands.config:main
|
4 |
+
accelerate-estimate-memory = accelerate.commands.estimate:main
|
5 |
+
accelerate-launch = accelerate.commands.launch:main
|
llm/Lib/site-packages/accelerate-0.29.3.dist-info/top_level.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
accelerate
|
llm/Lib/site-packages/accelerate/__init__.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
__version__ = "0.29.3"
|
15 |
+
|
16 |
+
from .accelerator import Accelerator
|
17 |
+
from .big_modeling import (
|
18 |
+
cpu_offload,
|
19 |
+
cpu_offload_with_hook,
|
20 |
+
disk_offload,
|
21 |
+
dispatch_model,
|
22 |
+
init_empty_weights,
|
23 |
+
init_on_device,
|
24 |
+
load_checkpoint_and_dispatch,
|
25 |
+
)
|
26 |
+
from .data_loader import skip_first_batches
|
27 |
+
from .inference import prepare_pippy
|
28 |
+
from .launchers import debug_launcher, notebook_launcher
|
29 |
+
from .state import PartialState
|
30 |
+
from .utils import (
|
31 |
+
AutocastKwargs,
|
32 |
+
DataLoaderConfiguration,
|
33 |
+
DeepSpeedPlugin,
|
34 |
+
DistributedDataParallelKwargs,
|
35 |
+
DistributedType,
|
36 |
+
FullyShardedDataParallelPlugin,
|
37 |
+
GradScalerKwargs,
|
38 |
+
InitProcessGroupKwargs,
|
39 |
+
find_executable_batch_size,
|
40 |
+
infer_auto_device_map,
|
41 |
+
is_rich_available,
|
42 |
+
load_checkpoint_in_model,
|
43 |
+
synchronize_rng_states,
|
44 |
+
)
|
45 |
+
|
46 |
+
|
47 |
+
if is_rich_available():
|
48 |
+
from .utils import rich
|
llm/Lib/site-packages/accelerate/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (1.53 kB). View file
|
|
llm/Lib/site-packages/accelerate/__pycache__/accelerator.cpython-311.pyc
ADDED
Binary file (158 kB). View file
|
|
llm/Lib/site-packages/accelerate/__pycache__/big_modeling.cpython-311.pyc
ADDED
Binary file (33 kB). View file
|
|
llm/Lib/site-packages/accelerate/__pycache__/checkpointing.cpython-311.pyc
ADDED
Binary file (14.3 kB). View file
|
|
llm/Lib/site-packages/accelerate/__pycache__/data_loader.cpython-311.pyc
ADDED
Binary file (50.9 kB). View file
|
|
llm/Lib/site-packages/accelerate/__pycache__/hooks.cpython-311.pyc
ADDED
Binary file (33.4 kB). View file
|
|
llm/Lib/site-packages/accelerate/__pycache__/inference.cpython-311.pyc
ADDED
Binary file (8.86 kB). View file
|
|
llm/Lib/site-packages/accelerate/__pycache__/launchers.cpython-311.pyc
ADDED
Binary file (11.7 kB). View file
|
|
llm/Lib/site-packages/accelerate/__pycache__/local_sgd.cpython-311.pyc
ADDED
Binary file (5.05 kB). View file
|
|
llm/Lib/site-packages/accelerate/__pycache__/logging.cpython-311.pyc
ADDED
Binary file (5.86 kB). View file
|
|
llm/Lib/site-packages/accelerate/__pycache__/memory_utils.cpython-311.pyc
ADDED
Binary file (472 Bytes). View file
|
|
llm/Lib/site-packages/accelerate/__pycache__/optimizer.cpython-311.pyc
ADDED
Binary file (11.1 kB). View file
|
|
llm/Lib/site-packages/accelerate/__pycache__/scheduler.cpython-311.pyc
ADDED
Binary file (4.66 kB). View file
|
|
llm/Lib/site-packages/accelerate/__pycache__/state.cpython-311.pyc
ADDED
Binary file (59.3 kB). View file
|
|
llm/Lib/site-packages/accelerate/__pycache__/tracking.cpython-311.pyc
ADDED
Binary file (53.4 kB). View file
|
|
llm/Lib/site-packages/accelerate/accelerator.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llm/Lib/site-packages/accelerate/big_modeling.py
ADDED
@@ -0,0 +1,627 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import logging
|
16 |
+
import os
|
17 |
+
from contextlib import contextmanager
|
18 |
+
from functools import wraps
|
19 |
+
from typing import Dict, List, Optional, Union
|
20 |
+
|
21 |
+
import torch
|
22 |
+
import torch.nn as nn
|
23 |
+
|
24 |
+
from .hooks import (
|
25 |
+
AlignDevicesHook,
|
26 |
+
CpuOffload,
|
27 |
+
UserCpuOffloadHook,
|
28 |
+
add_hook_to_module,
|
29 |
+
attach_align_device_hook,
|
30 |
+
attach_align_device_hook_on_blocks,
|
31 |
+
)
|
32 |
+
from .utils import (
|
33 |
+
OffloadedWeightsLoader,
|
34 |
+
check_cuda_p2p_ib_support,
|
35 |
+
check_device_map,
|
36 |
+
extract_submodules_state_dict,
|
37 |
+
find_tied_parameters,
|
38 |
+
get_balanced_memory,
|
39 |
+
infer_auto_device_map,
|
40 |
+
is_mlu_available,
|
41 |
+
is_npu_available,
|
42 |
+
is_torch_version,
|
43 |
+
is_xpu_available,
|
44 |
+
load_checkpoint_in_model,
|
45 |
+
offload_state_dict,
|
46 |
+
parse_flag_from_env,
|
47 |
+
retie_parameters,
|
48 |
+
)
|
49 |
+
from .utils.other import recursive_getattr
|
50 |
+
|
51 |
+
|
52 |
+
logger = logging.getLogger(__name__)
|
53 |
+
|
54 |
+
|
55 |
+
@contextmanager
|
56 |
+
def init_empty_weights(include_buffers: bool = None):
|
57 |
+
"""
|
58 |
+
A context manager under which models are initialized with all parameters on the meta device, therefore creating an
|
59 |
+
empty model. Useful when just initializing the model would blow the available RAM.
|
60 |
+
|
61 |
+
Args:
|
62 |
+
include_buffers (`bool`, *optional*):
|
63 |
+
Whether or not to also put all buffers on the meta device while initializing.
|
64 |
+
|
65 |
+
Example:
|
66 |
+
|
67 |
+
```python
|
68 |
+
import torch.nn as nn
|
69 |
+
from accelerate import init_empty_weights
|
70 |
+
|
71 |
+
# Initialize a model with 100 billions parameters in no time and without using any RAM.
|
72 |
+
with init_empty_weights():
|
73 |
+
tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])
|
74 |
+
```
|
75 |
+
|
76 |
+
<Tip warning={true}>
|
77 |
+
|
78 |
+
Any model created under this context manager has no weights. As such you can't do something like
|
79 |
+
`model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].
|
80 |
+
Make sure to overwrite the default device_map param for [`load_checkpoint_and_dispatch`], otherwise dispatch is not
|
81 |
+
called.
|
82 |
+
|
83 |
+
</Tip>
|
84 |
+
"""
|
85 |
+
if include_buffers is None:
|
86 |
+
include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False)
|
87 |
+
with init_on_device(torch.device("meta"), include_buffers=include_buffers) as f:
|
88 |
+
yield f
|
89 |
+
|
90 |
+
|
91 |
+
@contextmanager
|
92 |
+
def init_on_device(device: torch.device, include_buffers: bool = None):
|
93 |
+
"""
|
94 |
+
A context manager under which models are initialized with all parameters on the specified device.
|
95 |
+
|
96 |
+
Args:
|
97 |
+
device (`torch.device`):
|
98 |
+
Device to initialize all parameters on.
|
99 |
+
include_buffers (`bool`, *optional*):
|
100 |
+
Whether or not to also put all buffers on the meta device while initializing.
|
101 |
+
|
102 |
+
Example:
|
103 |
+
|
104 |
+
```python
|
105 |
+
import torch.nn as nn
|
106 |
+
from accelerate import init_on_device
|
107 |
+
|
108 |
+
with init_on_device(device=torch.device("cuda")):
|
109 |
+
tst = nn.Liner(100, 100) # on `cuda` device
|
110 |
+
```
|
111 |
+
"""
|
112 |
+
if include_buffers is None:
|
113 |
+
include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False)
|
114 |
+
|
115 |
+
# TODO(shingjan): remove the torch version check once older versions are deprecated
|
116 |
+
if is_torch_version(">=", "2.0") and include_buffers:
|
117 |
+
with device:
|
118 |
+
yield
|
119 |
+
return
|
120 |
+
|
121 |
+
old_register_parameter = nn.Module.register_parameter
|
122 |
+
if include_buffers:
|
123 |
+
old_register_buffer = nn.Module.register_buffer
|
124 |
+
|
125 |
+
def register_empty_parameter(module, name, param):
|
126 |
+
old_register_parameter(module, name, param)
|
127 |
+
if param is not None:
|
128 |
+
param_cls = type(module._parameters[name])
|
129 |
+
kwargs = module._parameters[name].__dict__
|
130 |
+
kwargs["requires_grad"] = param.requires_grad
|
131 |
+
module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
|
132 |
+
|
133 |
+
def register_empty_buffer(module, name, buffer, persistent=True):
|
134 |
+
old_register_buffer(module, name, buffer, persistent=persistent)
|
135 |
+
if buffer is not None:
|
136 |
+
module._buffers[name] = module._buffers[name].to(device)
|
137 |
+
|
138 |
+
# Patch tensor creation
|
139 |
+
if include_buffers:
|
140 |
+
tensor_constructors_to_patch = {
|
141 |
+
torch_function_name: getattr(torch, torch_function_name)
|
142 |
+
for torch_function_name in ["empty", "zeros", "ones", "full"]
|
143 |
+
}
|
144 |
+
else:
|
145 |
+
tensor_constructors_to_patch = {}
|
146 |
+
|
147 |
+
def patch_tensor_constructor(fn):
|
148 |
+
def wrapper(*args, **kwargs):
|
149 |
+
kwargs["device"] = device
|
150 |
+
return fn(*args, **kwargs)
|
151 |
+
|
152 |
+
return wrapper
|
153 |
+
|
154 |
+
try:
|
155 |
+
nn.Module.register_parameter = register_empty_parameter
|
156 |
+
if include_buffers:
|
157 |
+
nn.Module.register_buffer = register_empty_buffer
|
158 |
+
for torch_function_name in tensor_constructors_to_patch.keys():
|
159 |
+
setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name)))
|
160 |
+
yield
|
161 |
+
finally:
|
162 |
+
nn.Module.register_parameter = old_register_parameter
|
163 |
+
if include_buffers:
|
164 |
+
nn.Module.register_buffer = old_register_buffer
|
165 |
+
for torch_function_name, old_torch_function in tensor_constructors_to_patch.items():
|
166 |
+
setattr(torch, torch_function_name, old_torch_function)
|
167 |
+
|
168 |
+
|
169 |
+
def cpu_offload(
|
170 |
+
model: nn.Module,
|
171 |
+
execution_device: Optional[torch.device] = None,
|
172 |
+
offload_buffers: bool = False,
|
173 |
+
state_dict: Optional[Dict[str, torch.Tensor]] = None,
|
174 |
+
preload_module_classes: Optional[List[str]] = None,
|
175 |
+
):
|
176 |
+
"""
|
177 |
+
Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one
|
178 |
+
copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that
|
179 |
+
state dict and put on the execution device passed as they are needed, then offloaded again.
|
180 |
+
|
181 |
+
Args:
|
182 |
+
model (`torch.nn.Module`):
|
183 |
+
The model to offload.
|
184 |
+
execution_device (`torch.device`, *optional*):
|
185 |
+
The device on which the forward pass of the model will be executed (should be a GPU). Will default to the
|
186 |
+
model first parameter device.
|
187 |
+
offload_buffers (`bool`, *optional*, defaults to `False`):
|
188 |
+
Whether or not to offload the buffers with the model parameters.
|
189 |
+
state_dict (`Dict[str, torch.Tensor]`, *optional*):
|
190 |
+
The state dict of the model that will be kept on CPU.
|
191 |
+
preload_module_classes (`List[str]`, *optional*):
|
192 |
+
A list of classes whose instances should load all their weights (even in the submodules) at the beginning
|
193 |
+
of the forward. This should only be used for classes that have submodules which are registered but not
|
194 |
+
called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
|
195 |
+
`dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
|
196 |
+
"""
|
197 |
+
if execution_device is None:
|
198 |
+
execution_device = next(iter(model.parameters())).device
|
199 |
+
if state_dict is None:
|
200 |
+
state_dict = {n: p.to("cpu") for n, p in model.state_dict().items()}
|
201 |
+
|
202 |
+
add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True)
|
203 |
+
attach_align_device_hook(
|
204 |
+
model,
|
205 |
+
execution_device=execution_device,
|
206 |
+
offload=True,
|
207 |
+
offload_buffers=offload_buffers,
|
208 |
+
weights_map=state_dict,
|
209 |
+
preload_module_classes=preload_module_classes,
|
210 |
+
)
|
211 |
+
|
212 |
+
return model
|
213 |
+
|
214 |
+
|
215 |
+
def cpu_offload_with_hook(
|
216 |
+
model: torch.nn.Module,
|
217 |
+
execution_device: Optional[Union[int, str, torch.device]] = None,
|
218 |
+
prev_module_hook: Optional[UserCpuOffloadHook] = None,
|
219 |
+
):
|
220 |
+
"""
|
221 |
+
Offloads a model on the CPU and puts it back to an execution device when executed. The difference with
|
222 |
+
[`cpu_offload`] is that the model stays on the execution device after the forward and is only offloaded again when
|
223 |
+
the `offload` method of the returned `hook` is called. Useful for pipelines running a model in a loop.
|
224 |
+
|
225 |
+
Args:
|
226 |
+
model (`torch.nn.Module`):
|
227 |
+
The model to offload.
|
228 |
+
execution_device(`str`, `int` or `torch.device`, *optional*):
|
229 |
+
The device on which the model should be executed. Will default to the MPS device if it's available, then
|
230 |
+
GPU 0 if there is a GPU, and finally to the CPU.
|
231 |
+
prev_module_hook (`UserCpuOffloadHook`, *optional*):
|
232 |
+
The hook sent back by this function for a previous model in the pipeline you are running. If passed, its
|
233 |
+
offload method will be called just before the forward of the model to which this hook is attached.
|
234 |
+
|
235 |
+
Example:
|
236 |
+
|
237 |
+
```py
|
238 |
+
model_1, hook_1 = cpu_offload_with_hook(model_1, cuda_device)
|
239 |
+
model_2, hook_2 = cpu_offload_with_hook(model_2, cuda_device, prev_module_hook=hook_1)
|
240 |
+
model_3, hook_3 = cpu_offload_with_hook(model_3, cuda_device, prev_module_hook=hook_2)
|
241 |
+
|
242 |
+
hid_1 = model_1(input)
|
243 |
+
for i in range(50):
|
244 |
+
# model1 is offloaded on the CPU at the first iteration, model 2 stays on the GPU for this whole loop.
|
245 |
+
hid_2 = model_2(hid_1)
|
246 |
+
# model2 is offloaded to the CPU just before this forward.
|
247 |
+
hid_3 = model_3(hid_3)
|
248 |
+
|
249 |
+
# For model3, you need to manually call the hook offload method.
|
250 |
+
hook_3.offload()
|
251 |
+
```
|
252 |
+
"""
|
253 |
+
hook = CpuOffload(execution_device=execution_device, prev_module_hook=prev_module_hook)
|
254 |
+
add_hook_to_module(model, hook, append=True)
|
255 |
+
user_hook = UserCpuOffloadHook(model, hook)
|
256 |
+
return model, user_hook
|
257 |
+
|
258 |
+
|
259 |
+
def disk_offload(
|
260 |
+
model: nn.Module,
|
261 |
+
offload_dir: Union[str, os.PathLike],
|
262 |
+
execution_device: Optional[torch.device] = None,
|
263 |
+
offload_buffers: bool = False,
|
264 |
+
preload_module_classes: Optional[List[str]] = None,
|
265 |
+
):
|
266 |
+
"""
|
267 |
+
Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as
|
268 |
+
memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and
|
269 |
+
put on the execution device passed as they are needed, then offloaded again.
|
270 |
+
|
271 |
+
Args:
|
272 |
+
model (`torch.nn.Module`): The model to offload.
|
273 |
+
offload_dir (`str` or `os.PathLike`):
|
274 |
+
The folder in which to offload the model weights (or where the model weights are already offloaded).
|
275 |
+
execution_device (`torch.device`, *optional*):
|
276 |
+
The device on which the forward pass of the model will be executed (should be a GPU). Will default to the
|
277 |
+
model's first parameter device.
|
278 |
+
offload_buffers (`bool`, *optional*, defaults to `False`):
|
279 |
+
Whether or not to offload the buffers with the model parameters.
|
280 |
+
preload_module_classes (`List[str]`, *optional*):
|
281 |
+
A list of classes whose instances should load all their weights (even in the submodules) at the beginning
|
282 |
+
of the forward. This should only be used for classes that have submodules which are registered but not
|
283 |
+
called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
|
284 |
+
`dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
|
285 |
+
"""
|
286 |
+
if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json")):
|
287 |
+
offload_state_dict(offload_dir, model.state_dict())
|
288 |
+
if execution_device is None:
|
289 |
+
execution_device = next(iter(model.parameters())).device
|
290 |
+
weights_map = OffloadedWeightsLoader(save_folder=offload_dir)
|
291 |
+
|
292 |
+
add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True)
|
293 |
+
attach_align_device_hook(
|
294 |
+
model,
|
295 |
+
execution_device=execution_device,
|
296 |
+
offload=True,
|
297 |
+
offload_buffers=offload_buffers,
|
298 |
+
weights_map=weights_map,
|
299 |
+
preload_module_classes=preload_module_classes,
|
300 |
+
)
|
301 |
+
|
302 |
+
return model
|
303 |
+
|
304 |
+
|
305 |
+
def dispatch_model(
|
306 |
+
model: nn.Module,
|
307 |
+
device_map: Dict[str, Union[str, int, torch.device]],
|
308 |
+
main_device: Optional[torch.device] = None,
|
309 |
+
state_dict: Optional[Dict[str, torch.Tensor]] = None,
|
310 |
+
offload_dir: Optional[Union[str, os.PathLike]] = None,
|
311 |
+
offload_index: Optional[Dict[str, str]] = None,
|
312 |
+
offload_buffers: bool = False,
|
313 |
+
skip_keys: Optional[Union[str, List[str]]] = None,
|
314 |
+
preload_module_classes: Optional[List[str]] = None,
|
315 |
+
force_hooks: bool = False,
|
316 |
+
):
|
317 |
+
"""
|
318 |
+
Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on
|
319 |
+
the CPU or even the disk.
|
320 |
+
|
321 |
+
Args:
|
322 |
+
model (`torch.nn.Module`):
|
323 |
+
The model to dispatch.
|
324 |
+
device_map (`Dict[str, Union[str, int, torch.device]]`):
|
325 |
+
A dictionary mapping module names in the models `state_dict` to the device they should go to. Note that
|
326 |
+
`"disk"` is accepted even if it's not a proper value for `torch.device`.
|
327 |
+
main_device (`str`, `int` or `torch.device`, *optional*):
|
328 |
+
The main execution device. Will default to the first device in the `device_map` different from `"cpu"` or
|
329 |
+
`"disk"`.
|
330 |
+
state_dict (`Dict[str, torch.Tensor]`, *optional*):
|
331 |
+
The state dict of the part of the model that will be kept on CPU.
|
332 |
+
offload_dir (`str` or `os.PathLike`):
|
333 |
+
The folder in which to offload the model weights (or where the model weights are already offloaded).
|
334 |
+
offload_index (`Dict`, *optional*):
|
335 |
+
A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default
|
336 |
+
to the index saved in `save_folder`.
|
337 |
+
offload_buffers (`bool`, *optional*, defaults to `False`):
|
338 |
+
Whether or not to offload the buffers with the model parameters.
|
339 |
+
skip_keys (`str` or `List[str]`, *optional*):
|
340 |
+
A list of keys to ignore when moving inputs or outputs between devices.
|
341 |
+
preload_module_classes (`List[str]`, *optional*):
|
342 |
+
A list of classes whose instances should load all their weights (even in the submodules) at the beginning
|
343 |
+
of the forward. This should only be used for classes that have submodules which are registered but not
|
344 |
+
called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
|
345 |
+
`dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
|
346 |
+
force_hooks (`bool`, *optional*, defaults to `False`):
|
347 |
+
Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a
|
348 |
+
single device.
|
349 |
+
"""
|
350 |
+
# Error early if the device map is incomplete.
|
351 |
+
check_device_map(model, device_map)
|
352 |
+
|
353 |
+
# for backward compatibility
|
354 |
+
is_bnb_quantized = (
|
355 |
+
getattr(model, "is_quantized", False) or getattr(model, "is_loaded_in_8bit", False)
|
356 |
+
) and getattr(model, "quantization_method", "bitsandbytes") == "bitsandbytes"
|
357 |
+
|
358 |
+
# We attach hooks if the device_map has at least 2 different devices or if
|
359 |
+
# force_hooks is set to `True`. Otherwise, the model in already loaded
|
360 |
+
# in the unique device and the user can decide where to dispatch the model.
|
361 |
+
# If the model is quantized, we always force-dispatch the model
|
362 |
+
if (len(set(device_map.values())) > 1) or is_bnb_quantized or force_hooks:
|
363 |
+
if main_device is None:
|
364 |
+
if set(device_map.values()) == {"cpu"} or set(device_map.values()) == {"cpu", "disk"}:
|
365 |
+
main_device = "cpu"
|
366 |
+
else:
|
367 |
+
main_device = [d for d in device_map.values() if d not in ["cpu", "disk"]][0]
|
368 |
+
|
369 |
+
if main_device != "cpu":
|
370 |
+
cpu_modules = [name for name, device in device_map.items() if device == "cpu"]
|
371 |
+
if state_dict is None and len(cpu_modules) > 0:
|
372 |
+
state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules)
|
373 |
+
|
374 |
+
disk_modules = [name for name, device in device_map.items() if device == "disk"]
|
375 |
+
if offload_dir is None and offload_index is None and len(disk_modules) > 0:
|
376 |
+
raise ValueError(
|
377 |
+
"We need an `offload_dir` to dispatch this model according to this `device_map`, the following submodules "
|
378 |
+
f"need to be offloaded: {', '.join(disk_modules)}."
|
379 |
+
)
|
380 |
+
if (
|
381 |
+
len(disk_modules) > 0
|
382 |
+
and offload_index is None
|
383 |
+
and (not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json")))
|
384 |
+
):
|
385 |
+
disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules)
|
386 |
+
offload_state_dict(offload_dir, disk_state_dict)
|
387 |
+
|
388 |
+
execution_device = {
|
389 |
+
name: main_device if device in ["cpu", "disk"] else device for name, device in device_map.items()
|
390 |
+
}
|
391 |
+
execution_device[""] = main_device
|
392 |
+
offloaded_devices = ["disk"] if main_device == "cpu" or main_device == "mps" else ["cpu", "disk"]
|
393 |
+
offload = {name: device in offloaded_devices for name, device in device_map.items()}
|
394 |
+
save_folder = offload_dir if len(disk_modules) > 0 else None
|
395 |
+
if state_dict is not None or save_folder is not None or offload_index is not None:
|
396 |
+
device = main_device if offload_index is not None else None
|
397 |
+
weights_map = OffloadedWeightsLoader(
|
398 |
+
state_dict=state_dict, save_folder=save_folder, index=offload_index, device=device
|
399 |
+
)
|
400 |
+
else:
|
401 |
+
weights_map = None
|
402 |
+
|
403 |
+
# When dispatching the model's parameters to the devices specified in device_map, we want to avoid allocating memory several times for the
|
404 |
+
# tied parameters. The dictionary tied_params_map keeps track of the already allocated data for a given tied parameter (represented by its
|
405 |
+
# original pointer) on each devices.
|
406 |
+
tied_params = find_tied_parameters(model)
|
407 |
+
|
408 |
+
tied_params_map = {}
|
409 |
+
for group in tied_params:
|
410 |
+
for param_name in group:
|
411 |
+
# data_ptr() is enough here, as `find_tied_parameters` finds tied params simply by comparing `param1 is param2`, so we don't need
|
412 |
+
# to care about views of tensors through storage_offset.
|
413 |
+
data_ptr = recursive_getattr(model, param_name).data_ptr()
|
414 |
+
tied_params_map[data_ptr] = {}
|
415 |
+
|
416 |
+
# Note: To handle the disk offloading case, we can not simply use weights_map[param_name].data_ptr() as the reference pointer,
|
417 |
+
# as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer.
|
418 |
+
|
419 |
+
attach_align_device_hook_on_blocks(
|
420 |
+
model,
|
421 |
+
execution_device=execution_device,
|
422 |
+
offload=offload,
|
423 |
+
offload_buffers=offload_buffers,
|
424 |
+
weights_map=weights_map,
|
425 |
+
skip_keys=skip_keys,
|
426 |
+
preload_module_classes=preload_module_classes,
|
427 |
+
tied_params_map=tied_params_map,
|
428 |
+
)
|
429 |
+
|
430 |
+
# warn if there is any params on the meta device
|
431 |
+
offloaded_devices_str = " and ".join(
|
432 |
+
[device for device in set(device_map.values()) if device in ("cpu", "disk")]
|
433 |
+
)
|
434 |
+
if len(offloaded_devices_str) > 0:
|
435 |
+
logging.warning(
|
436 |
+
f"Some parameters are on the meta device device because they were offloaded to the {offloaded_devices_str}."
|
437 |
+
)
|
438 |
+
|
439 |
+
# Attaching the hook may break tied weights, so we retie them
|
440 |
+
retie_parameters(model, tied_params)
|
441 |
+
|
442 |
+
# add warning to cuda and to method
|
443 |
+
def add_warning(fn, model):
|
444 |
+
@wraps(fn)
|
445 |
+
def wrapper(*args, **kwargs):
|
446 |
+
warning_msg = "You shouldn't move a model that is dispatched using accelerate hooks."
|
447 |
+
if str(fn.__name__) == "to":
|
448 |
+
to_device = torch._C._nn._parse_to(*args, **kwargs)[0]
|
449 |
+
if to_device is not None:
|
450 |
+
logger.warning(warning_msg)
|
451 |
+
else:
|
452 |
+
logger.warning(warning_msg)
|
453 |
+
for param in model.parameters():
|
454 |
+
if param.device == torch.device("meta"):
|
455 |
+
raise RuntimeError("You can't move a model that has some modules offloaded to cpu or disk.")
|
456 |
+
return fn(*args, **kwargs)
|
457 |
+
|
458 |
+
return wrapper
|
459 |
+
|
460 |
+
model.to = add_warning(model.to, model)
|
461 |
+
if is_npu_available():
|
462 |
+
model.npu = add_warning(model.npu, model)
|
463 |
+
elif is_mlu_available():
|
464 |
+
model.mlu = add_warning(model.mlu, model)
|
465 |
+
elif is_xpu_available():
|
466 |
+
model.xpu = add_warning(model.xpu, model)
|
467 |
+
else:
|
468 |
+
model.cuda = add_warning(model.cuda, model)
|
469 |
+
|
470 |
+
# Check if we are using multi-gpus with RTX 4000 series
|
471 |
+
use_multi_gpu = len([device for device in set(device_map.values()) if device not in ("cpu", "disk")]) > 1
|
472 |
+
if use_multi_gpu and not check_cuda_p2p_ib_support():
|
473 |
+
logger.warning(
|
474 |
+
"We've detected an older driver with an RTX 4000 series GPU. These drivers have issues with P2P. "
|
475 |
+
"This can affect the multi-gpu inference when using accelerate device_map."
|
476 |
+
"Please make sure to update your driver to the latest version which resolves this."
|
477 |
+
)
|
478 |
+
else:
|
479 |
+
device = list(device_map.values())[0]
|
480 |
+
# `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)).
|
481 |
+
if is_npu_available() and isinstance(device, int):
|
482 |
+
device = f"npu:{device}"
|
483 |
+
elif is_mlu_available() and isinstance(device, int):
|
484 |
+
device = f"mlu:{device}"
|
485 |
+
elif is_xpu_available() and isinstance(device, int):
|
486 |
+
device = f"xpu:{device}"
|
487 |
+
if device != "disk":
|
488 |
+
model.to(device)
|
489 |
+
else:
|
490 |
+
raise ValueError(
|
491 |
+
"You are trying to offload the whole model to the disk. Please use the `disk_offload` function instead."
|
492 |
+
)
|
493 |
+
# Convert OrderedDict back to dict for easier usage
|
494 |
+
model.hf_device_map = dict(device_map)
|
495 |
+
return model
|
496 |
+
|
497 |
+
|
498 |
+
def load_checkpoint_and_dispatch(
|
499 |
+
model: nn.Module,
|
500 |
+
checkpoint: Union[str, os.PathLike],
|
501 |
+
device_map: Optional[Union[str, Dict[str, Union[int, str, torch.device]]]] = None,
|
502 |
+
max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None,
|
503 |
+
no_split_module_classes: Optional[List[str]] = None,
|
504 |
+
offload_folder: Optional[Union[str, os.PathLike]] = None,
|
505 |
+
offload_buffers: bool = False,
|
506 |
+
dtype: Optional[Union[str, torch.dtype]] = None,
|
507 |
+
offload_state_dict: Optional[bool] = None,
|
508 |
+
skip_keys: Optional[Union[str, List[str]]] = None,
|
509 |
+
preload_module_classes: Optional[List[str]] = None,
|
510 |
+
force_hooks: bool = False,
|
511 |
+
strict: bool = False,
|
512 |
+
):
|
513 |
+
"""
|
514 |
+
Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are
|
515 |
+
loaded and adds the various hooks that will make this model run properly (even if split across devices).
|
516 |
+
|
517 |
+
Args:
|
518 |
+
model (`torch.nn.Module`): The model in which we want to load a checkpoint.
|
519 |
+
checkpoint (`str` or `os.PathLike`):
|
520 |
+
The folder checkpoint to load. It can be:
|
521 |
+
- a path to a file containing a whole model state dict
|
522 |
+
- a path to a `.json` file containing the index to a sharded checkpoint
|
523 |
+
- a path to a folder containing a unique `.index.json` file and the shards of a checkpoint.
|
524 |
+
device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):
|
525 |
+
A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer
|
526 |
+
name, once a given module name is inside, every submodule of it will be sent to the same device.
|
527 |
+
|
528 |
+
To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more
|
529 |
+
information about each option see [here](../concept_guides/big_model_inference#designing-a-device-map).
|
530 |
+
Defaults to None, which means [`dispatch_model`] will not be called.
|
531 |
+
max_memory (`Dict`, *optional*):
|
532 |
+
A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU
|
533 |
+
and the available CPU RAM if unset.
|
534 |
+
no_split_module_classes (`List[str]`, *optional*):
|
535 |
+
A list of layer class names that should never be split across device (for instance any layer that has a
|
536 |
+
residual connection).
|
537 |
+
offload_folder (`str` or `os.PathLike`, *optional*):
|
538 |
+
If the `device_map` contains any value `"disk"`, the folder where we will offload weights.
|
539 |
+
offload_buffers (`bool`, *optional*, defaults to `False`):
|
540 |
+
In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as
|
541 |
+
well as the parameters.
|
542 |
+
dtype (`str` or `torch.dtype`, *optional*):
|
543 |
+
If provided, the weights will be converted to that type when loaded.
|
544 |
+
offload_state_dict (`bool`, *optional*):
|
545 |
+
If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if
|
546 |
+
the weight of the CPU state dict + the biggest shard does not fit. Will default to `True` if the device map
|
547 |
+
picked contains `"disk"` values.
|
548 |
+
skip_keys (`str` or `List[str]`, *optional*):
|
549 |
+
A list of keys to ignore when moving inputs or outputs between devices.
|
550 |
+
preload_module_classes (`List[str]`, *optional*):
|
551 |
+
A list of classes whose instances should load all their weights (even in the submodules) at the beginning
|
552 |
+
of the forward. This should only be used for classes that have submodules which are registered but not
|
553 |
+
called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
|
554 |
+
`dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
|
555 |
+
force_hooks (`bool`, *optional*, defaults to `False`):
|
556 |
+
Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a
|
557 |
+
single device.
|
558 |
+
strict (`bool`, *optional*, defaults to `False`):
|
559 |
+
Whether to strictly enforce that the keys in the checkpoint state_dict match the keys of the model's
|
560 |
+
state_dict.
|
561 |
+
|
562 |
+
Example:
|
563 |
+
|
564 |
+
```python
|
565 |
+
>>> from accelerate import init_empty_weights, load_checkpoint_and_dispatch
|
566 |
+
>>> from huggingface_hub import hf_hub_download
|
567 |
+
>>> from transformers import AutoConfig, AutoModelForCausalLM
|
568 |
+
|
569 |
+
>>> # Download the Weights
|
570 |
+
>>> checkpoint = "EleutherAI/gpt-j-6B"
|
571 |
+
>>> weights_location = hf_hub_download(checkpoint, "pytorch_model.bin")
|
572 |
+
|
573 |
+
>>> # Create a model and initialize it with empty weights
|
574 |
+
>>> config = AutoConfig.from_pretrained(checkpoint)
|
575 |
+
>>> with init_empty_weights():
|
576 |
+
... model = AutoModelForCausalLM.from_config(config)
|
577 |
+
|
578 |
+
>>> # Load the checkpoint and dispatch it to the right devices
|
579 |
+
>>> model = load_checkpoint_and_dispatch(
|
580 |
+
... model, weights_location, device_map="auto", no_split_module_classes=["GPTJBlock"]
|
581 |
+
... )
|
582 |
+
```
|
583 |
+
"""
|
584 |
+
if isinstance(device_map, str) and device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
|
585 |
+
raise ValueError(
|
586 |
+
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
|
587 |
+
"'sequential'."
|
588 |
+
)
|
589 |
+
if isinstance(device_map, str):
|
590 |
+
if device_map != "sequential":
|
591 |
+
max_memory = get_balanced_memory(
|
592 |
+
model,
|
593 |
+
max_memory=max_memory,
|
594 |
+
no_split_module_classes=no_split_module_classes,
|
595 |
+
dtype=dtype,
|
596 |
+
low_zero=(device_map == "balanced_low_0"),
|
597 |
+
)
|
598 |
+
device_map = infer_auto_device_map(
|
599 |
+
model,
|
600 |
+
max_memory=max_memory,
|
601 |
+
no_split_module_classes=no_split_module_classes,
|
602 |
+
dtype=dtype,
|
603 |
+
offload_buffers=offload_buffers,
|
604 |
+
)
|
605 |
+
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
|
606 |
+
offload_state_dict = True
|
607 |
+
load_checkpoint_in_model(
|
608 |
+
model,
|
609 |
+
checkpoint,
|
610 |
+
device_map=device_map,
|
611 |
+
offload_folder=offload_folder,
|
612 |
+
dtype=dtype,
|
613 |
+
offload_state_dict=offload_state_dict,
|
614 |
+
offload_buffers=offload_buffers,
|
615 |
+
strict=strict,
|
616 |
+
)
|
617 |
+
if device_map is None:
|
618 |
+
return model
|
619 |
+
return dispatch_model(
|
620 |
+
model,
|
621 |
+
device_map=device_map,
|
622 |
+
offload_dir=offload_folder,
|
623 |
+
offload_buffers=offload_buffers,
|
624 |
+
skip_keys=skip_keys,
|
625 |
+
preload_module_classes=preload_module_classes,
|
626 |
+
force_hooks=force_hooks,
|
627 |
+
)
|
llm/Lib/site-packages/accelerate/checkpointing.py
ADDED
@@ -0,0 +1,275 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import random
|
16 |
+
from pathlib import Path
|
17 |
+
from typing import List
|
18 |
+
|
19 |
+
import numpy as np
|
20 |
+
import torch
|
21 |
+
from safetensors.torch import load_file
|
22 |
+
from torch.cuda.amp import GradScaler
|
23 |
+
|
24 |
+
from .utils import (
|
25 |
+
MODEL_NAME,
|
26 |
+
OPTIMIZER_NAME,
|
27 |
+
RNG_STATE_NAME,
|
28 |
+
SAFE_MODEL_NAME,
|
29 |
+
SAFE_WEIGHTS_NAME,
|
30 |
+
SAMPLER_NAME,
|
31 |
+
SCALER_NAME,
|
32 |
+
SCHEDULER_NAME,
|
33 |
+
WEIGHTS_NAME,
|
34 |
+
get_pretty_name,
|
35 |
+
is_torch_xla_available,
|
36 |
+
is_xpu_available,
|
37 |
+
save,
|
38 |
+
)
|
39 |
+
|
40 |
+
|
41 |
+
if is_torch_xla_available():
|
42 |
+
import torch_xla.core.xla_model as xm
|
43 |
+
|
44 |
+
from .logging import get_logger
|
45 |
+
from .state import PartialState
|
46 |
+
|
47 |
+
|
48 |
+
logger = get_logger(__name__)
|
49 |
+
|
50 |
+
|
51 |
+
def save_accelerator_state(
|
52 |
+
output_dir: str,
|
53 |
+
model_states: List[dict],
|
54 |
+
optimizers: list,
|
55 |
+
schedulers: list,
|
56 |
+
dataloaders: list,
|
57 |
+
process_index: int,
|
58 |
+
scaler: GradScaler = None,
|
59 |
+
save_on_each_node: bool = False,
|
60 |
+
safe_serialization: bool = True,
|
61 |
+
):
|
62 |
+
"""
|
63 |
+
Saves the current states of the models, optimizers, scaler, and RNG generators to a given directory.
|
64 |
+
|
65 |
+
<Tip>
|
66 |
+
|
67 |
+
If `safe_serialization` is `True`, models will be saved with `safetensors` while the rest are saved using native
|
68 |
+
`pickle`.
|
69 |
+
|
70 |
+
</Tip>
|
71 |
+
|
72 |
+
Args:
|
73 |
+
output_dir (`str` or `os.PathLike`):
|
74 |
+
The name of the folder to save all relevant weights and states.
|
75 |
+
model_states (`List[torch.nn.Module]`):
|
76 |
+
A list of model states
|
77 |
+
optimizers (`List[torch.optim.Optimizer]`):
|
78 |
+
A list of optimizer instances
|
79 |
+
schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`):
|
80 |
+
A list of learning rate schedulers
|
81 |
+
dataloaders (`List[torch.utils.data.DataLoader]`):
|
82 |
+
A list of dataloader instances to save their sampler states
|
83 |
+
process_index (`int`):
|
84 |
+
The current process index in the Accelerator state
|
85 |
+
scaler (`torch.cuda.amp.GradScaler`, *optional*):
|
86 |
+
An optional gradient scaler instance to save
|
87 |
+
save_on_each_node (`bool`, *optional*):
|
88 |
+
Whether to save on every node, or only the main node.
|
89 |
+
safe_serialization (`bool`, *optional*, defaults to `True`):
|
90 |
+
Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
|
91 |
+
"""
|
92 |
+
output_dir = Path(output_dir)
|
93 |
+
# Model states
|
94 |
+
for i, state in enumerate(model_states):
|
95 |
+
weights_name = WEIGHTS_NAME if not safe_serialization else SAFE_WEIGHTS_NAME
|
96 |
+
if i > 0:
|
97 |
+
weights_name = weights_name.replace(".", f"_{i}.")
|
98 |
+
output_model_file = output_dir.joinpath(weights_name)
|
99 |
+
save(state, output_model_file, save_on_each_node=save_on_each_node, safe_serialization=safe_serialization)
|
100 |
+
logger.info(f"Model weights saved in {output_model_file}")
|
101 |
+
# Optimizer states
|
102 |
+
for i, opt in enumerate(optimizers):
|
103 |
+
state = opt.state_dict()
|
104 |
+
optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin"
|
105 |
+
output_optimizer_file = output_dir.joinpath(optimizer_name)
|
106 |
+
save(state, output_optimizer_file, save_on_each_node=save_on_each_node, safe_serialization=False)
|
107 |
+
logger.info(f"Optimizer state saved in {output_optimizer_file}")
|
108 |
+
# Scheduler states
|
109 |
+
for i, scheduler in enumerate(schedulers):
|
110 |
+
state = scheduler.state_dict()
|
111 |
+
scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin"
|
112 |
+
output_scheduler_file = output_dir.joinpath(scheduler_name)
|
113 |
+
save(state, output_scheduler_file, save_on_each_node=save_on_each_node, safe_serialization=False)
|
114 |
+
logger.info(f"Scheduler state saved in {output_scheduler_file}")
|
115 |
+
# DataLoader states
|
116 |
+
for i, dataloader in enumerate(dataloaders):
|
117 |
+
sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin"
|
118 |
+
output_sampler_file = output_dir.joinpath(sampler_name)
|
119 |
+
# Only save if we have our custom sampler
|
120 |
+
from .data_loader import IterableDatasetShard, SeedableRandomSampler
|
121 |
+
|
122 |
+
if isinstance(dataloader.dataset, IterableDatasetShard):
|
123 |
+
sampler = dataloader.sampler.sampler
|
124 |
+
|
125 |
+
if isinstance(sampler, SeedableRandomSampler):
|
126 |
+
save(sampler, output_sampler_file, save_on_each_node=save_on_each_node, safe_serialization=False)
|
127 |
+
logger.info(f"Sampler state for dataloader {i} saved in {output_sampler_file}")
|
128 |
+
|
129 |
+
# GradScaler state
|
130 |
+
if scaler is not None:
|
131 |
+
state = scaler.state_dict()
|
132 |
+
output_scaler_file = output_dir.joinpath(SCALER_NAME)
|
133 |
+
torch.save(state, output_scaler_file)
|
134 |
+
logger.info(f"Gradient scaler state saved in {output_scaler_file}")
|
135 |
+
# Random number generator states
|
136 |
+
states = {}
|
137 |
+
states_name = f"{RNG_STATE_NAME}_{process_index}.pkl"
|
138 |
+
states["random_state"] = random.getstate()
|
139 |
+
states["numpy_random_seed"] = np.random.get_state()
|
140 |
+
states["torch_manual_seed"] = torch.get_rng_state()
|
141 |
+
if is_xpu_available():
|
142 |
+
states["torch_xpu_manual_seed"] = torch.xpu.get_rng_state_all()
|
143 |
+
else:
|
144 |
+
states["torch_cuda_manual_seed"] = torch.cuda.get_rng_state_all()
|
145 |
+
if is_torch_xla_available():
|
146 |
+
states["xm_seed"] = xm.get_rng_state()
|
147 |
+
output_states_file = output_dir.joinpath(states_name)
|
148 |
+
torch.save(states, output_states_file)
|
149 |
+
logger.info(f"Random states saved in {output_states_file}")
|
150 |
+
return output_dir
|
151 |
+
|
152 |
+
|
153 |
+
def load_accelerator_state(
|
154 |
+
input_dir,
|
155 |
+
models,
|
156 |
+
optimizers,
|
157 |
+
schedulers,
|
158 |
+
dataloaders,
|
159 |
+
process_index,
|
160 |
+
scaler=None,
|
161 |
+
map_location=None,
|
162 |
+
**load_model_func_kwargs,
|
163 |
+
):
|
164 |
+
"""
|
165 |
+
Loads states of the models, optimizers, scaler, and RNG generators from a given directory.
|
166 |
+
|
167 |
+
Args:
|
168 |
+
input_dir (`str` or `os.PathLike`):
|
169 |
+
The name of the folder to load all relevant weights and states.
|
170 |
+
models (`List[torch.nn.Module]`):
|
171 |
+
A list of model instances
|
172 |
+
optimizers (`List[torch.optim.Optimizer]`):
|
173 |
+
A list of optimizer instances
|
174 |
+
schedulers (`List[torch.optim.lr_scheduler._LRScheduler]`):
|
175 |
+
A list of learning rate schedulers
|
176 |
+
process_index (`int`):
|
177 |
+
The current process index in the Accelerator state
|
178 |
+
scaler (`torch.cuda.amp.GradScaler`, *optional*):
|
179 |
+
An optional *GradScaler* instance to load
|
180 |
+
map_location (`str`, *optional*):
|
181 |
+
What device to load the optimizer state onto. Should be one of either "cpu" or "on_device".
|
182 |
+
load_model_func_kwargs (`dict`, *optional*):
|
183 |
+
Additional arguments that can be passed to the model's `load_state_dict` method.
|
184 |
+
"""
|
185 |
+
if map_location not in [None, "cpu", "on_device"]:
|
186 |
+
raise TypeError(
|
187 |
+
"Unsupported optimizer map location passed, please choose one of `None`, `'cpu'`, or `'on_device'`"
|
188 |
+
)
|
189 |
+
if map_location is None:
|
190 |
+
map_location = "cpu"
|
191 |
+
elif map_location == "on_device":
|
192 |
+
map_location = PartialState().device
|
193 |
+
|
194 |
+
input_dir = Path(input_dir)
|
195 |
+
# Model states
|
196 |
+
for i, model in enumerate(models):
|
197 |
+
ending = f"_{i}" if i > 0 else ""
|
198 |
+
input_model_file = input_dir.joinpath(f"{SAFE_MODEL_NAME}{ending}.safetensors")
|
199 |
+
if input_model_file.exists():
|
200 |
+
state_dict = load_file(input_model_file, device=str(map_location))
|
201 |
+
else:
|
202 |
+
# Load with torch
|
203 |
+
input_model_file = input_dir.joinpath(f"{MODEL_NAME}{ending}.bin")
|
204 |
+
state_dict = torch.load(input_model_file, map_location=map_location)
|
205 |
+
models[i].load_state_dict(state_dict, **load_model_func_kwargs)
|
206 |
+
logger.info("All model weights loaded successfully")
|
207 |
+
|
208 |
+
# Optimizer states
|
209 |
+
for i, opt in enumerate(optimizers):
|
210 |
+
optimizer_name = f"{OPTIMIZER_NAME}.bin" if i == 0 else f"{OPTIMIZER_NAME}_{i}.bin"
|
211 |
+
input_optimizer_file = input_dir.joinpath(optimizer_name)
|
212 |
+
optimizer_state = torch.load(input_optimizer_file, map_location=map_location)
|
213 |
+
optimizers[i].load_state_dict(optimizer_state)
|
214 |
+
logger.info("All optimizer states loaded successfully")
|
215 |
+
|
216 |
+
# Scheduler states
|
217 |
+
for i, scheduler in enumerate(schedulers):
|
218 |
+
scheduler_name = f"{SCHEDULER_NAME}.bin" if i == 0 else f"{SCHEDULER_NAME}_{i}.bin"
|
219 |
+
input_scheduler_file = input_dir.joinpath(scheduler_name)
|
220 |
+
scheduler.load_state_dict(torch.load(input_scheduler_file))
|
221 |
+
logger.info("All scheduler states loaded successfully")
|
222 |
+
|
223 |
+
for i, dataloader in enumerate(dataloaders):
|
224 |
+
sampler_name = f"{SAMPLER_NAME}.bin" if i == 0 else f"{SAMPLER_NAME}_{i}.bin"
|
225 |
+
input_sampler_file = input_dir.joinpath(sampler_name)
|
226 |
+
# Only load if we have our custom sampler
|
227 |
+
from .data_loader import IterableDatasetShard, SeedableRandomSampler
|
228 |
+
|
229 |
+
if isinstance(dataloader.dataset, IterableDatasetShard):
|
230 |
+
sampler = dataloader.sampler.sampler
|
231 |
+
|
232 |
+
if isinstance(sampler, SeedableRandomSampler):
|
233 |
+
dataloader.sampler.sampler = torch.load(input_sampler_file)
|
234 |
+
logger.info("All dataloader sampler states loaded successfully")
|
235 |
+
|
236 |
+
# GradScaler state
|
237 |
+
if scaler is not None:
|
238 |
+
input_scaler_file = input_dir.joinpath(SCALER_NAME)
|
239 |
+
scaler.load_state_dict(torch.load(input_scaler_file))
|
240 |
+
logger.info("GradScaler state loaded successfully")
|
241 |
+
|
242 |
+
# Random states
|
243 |
+
try:
|
244 |
+
states = torch.load(input_dir.joinpath(f"{RNG_STATE_NAME}_{process_index}.pkl"))
|
245 |
+
random.setstate(states["random_state"])
|
246 |
+
np.random.set_state(states["numpy_random_seed"])
|
247 |
+
torch.set_rng_state(states["torch_manual_seed"])
|
248 |
+
if is_xpu_available():
|
249 |
+
torch.xpu.set_rng_state_all(states["torch_xpu_manual_seed"])
|
250 |
+
else:
|
251 |
+
torch.cuda.set_rng_state_all(states["torch_cuda_manual_seed"])
|
252 |
+
if is_torch_xla_available():
|
253 |
+
xm.set_rng_state(states["xm_seed"])
|
254 |
+
logger.info("All random states loaded successfully")
|
255 |
+
except Exception:
|
256 |
+
logger.info("Could not load random states")
|
257 |
+
|
258 |
+
|
259 |
+
def save_custom_state(obj, path, index: int = 0, save_on_each_node: bool = False):
|
260 |
+
"""
|
261 |
+
Saves the state of `obj` to `{path}/custom_checkpoint_{index}.pkl`
|
262 |
+
"""
|
263 |
+
# Should this be the right way to get a qual_name type value from `obj`?
|
264 |
+
save_location = Path(path) / f"custom_checkpoint_{index}.pkl"
|
265 |
+
logger.info(f"Saving the state of {get_pretty_name(obj)} to {save_location}")
|
266 |
+
save(obj.state_dict(), save_location, save_on_each_node=save_on_each_node)
|
267 |
+
|
268 |
+
|
269 |
+
def load_custom_state(obj, path, index: int = 0):
|
270 |
+
"""
|
271 |
+
Loads the state of `obj` at `{path}/custom_checkpoint_{index}.pkl`
|
272 |
+
"""
|
273 |
+
load_location = f"{path}/custom_checkpoint_{index}.pkl"
|
274 |
+
logger.info(f"Loading the state of {get_pretty_name(obj)} from {load_location}")
|
275 |
+
obj.load_state_dict(torch.load(load_location, map_location="cpu"))
|
llm/Lib/site-packages/accelerate/commands/__init__.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
llm/Lib/site-packages/accelerate/commands/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (170 Bytes). View file
|
|
llm/Lib/site-packages/accelerate/commands/__pycache__/accelerate_cli.cpython-311.pyc
ADDED
Binary file (1.91 kB). View file
|
|
llm/Lib/site-packages/accelerate/commands/__pycache__/env.cpython-311.pyc
ADDED
Binary file (5.03 kB). View file
|
|
llm/Lib/site-packages/accelerate/commands/__pycache__/estimate.cpython-311.pyc
ADDED
Binary file (16.5 kB). View file
|
|
llm/Lib/site-packages/accelerate/commands/__pycache__/launch.cpython-311.pyc
ADDED
Binary file (47.8 kB). View file
|
|
llm/Lib/site-packages/accelerate/commands/__pycache__/test.cpython-311.pyc
ADDED
Binary file (2.35 kB). View file
|
|