DrishtiSharma
commited on
Add new SentenceTransformer model.
Browse files- .gitattributes +1 -0
- 1_Pooling/config.json +10 -0
- 2_Dense/config.json +1 -0
- 2_Dense/model.safetensors +3 -0
- README.md +714 -0
- added_tokens.json +5 -0
- config.json +34 -0
- config_sentence_transformers.json +13 -0
- merges.txt +0 -0
- model-00001-of-00002.safetensors +3 -0
- model-00002-of-00002.safetensors +3 -0
- model.safetensors.index.json +345 -0
- modules.json +20 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +20 -0
- tokenizer.json +3 -0
- tokenizer_config.json +50 -0
- vocab.json +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
1_Pooling/config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"word_embedding_dimension": 1536,
|
3 |
+
"pooling_mode_cls_token": false,
|
4 |
+
"pooling_mode_mean_tokens": true,
|
5 |
+
"pooling_mode_max_tokens": false,
|
6 |
+
"pooling_mode_mean_sqrt_len_tokens": false,
|
7 |
+
"pooling_mode_weightedmean_tokens": false,
|
8 |
+
"pooling_mode_lasttoken": false,
|
9 |
+
"include_prompt": true
|
10 |
+
}
|
2_Dense/config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"in_features": 1536, "out_features": 1024, "bias": true, "activation_function": "torch.nn.modules.linear.Identity"}
|
2_Dense/model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d8267c28b7b2bd2136d38abc648512d12d9caeb9b98906b5ad6a4d1ea95faa6f
|
3 |
+
size 6295712
|
README.md
ADDED
@@ -0,0 +1,714 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
tags:
|
3 |
+
- sentence-transformers
|
4 |
+
- sentence-similarity
|
5 |
+
- feature-extraction
|
6 |
+
- generated_from_trainer
|
7 |
+
- dataset_size:22291
|
8 |
+
- loss:MultipleNegativesRankingLoss
|
9 |
+
base_model: dunzhang/stella_en_1.5B_v5
|
10 |
+
widget:
|
11 |
+
- source_sentence: What specific information should be included in the annual report
|
12 |
+
to adequately explain a company's business model and strategy in alignment with
|
13 |
+
Principle 7?
|
14 |
+
sentences:
|
15 |
+
- 'REGULATORY REQUIREMENTS FOR AUTHORISED PERSONS ENGAGED IN REGULATED ACTIVITIES
|
16 |
+
IN RELATION TO VIRTUAL ASSETS
|
17 |
+
|
18 |
+
Anti-Money Laundering and Countering Financing of Terrorism
|
19 |
+
|
20 |
+
On 22 February 2019, FATF issued a public statement recognising the need to adequately
|
21 |
+
mitigate the ML and TF risks associated with digital asset activities. As per
|
22 |
+
the statement, FATF proposed more details relating to the regulation and supervision/monitoring
|
23 |
+
of virtual assets (“VAs”) and virtual asset services providers (“VASPs”) by way
|
24 |
+
of its (Draft) Interpretive Note to Recommendation 15, “New technologies”.
|
25 |
+
|
26 |
+
'
|
27 |
+
- "REGULATORY REQUIREMENTS FOR AUTHORISED PERSONS ENGAGED IN REGULATED ACTIVITIES\
|
28 |
+
\ IN RELATION TO VIRTUAL ASSETS\nTechnology Governance and Controls\nWhen complying\
|
29 |
+
\ with GEN Rule 3.3 and COBS Rule 17.5, Authorised Persons should have due regard\
|
30 |
+
\ to the following key areas from a technology perspective:\n\na)\tCareful maintenance\
|
31 |
+
\ and development of systems and architecture (e.g., code version control, implementation\
|
32 |
+
\ of updates, issue resolution, and regular internal and third party testing);\n\
|
33 |
+
\nb)\tSecurity measures and procedures for the safe storage and transmission of\
|
34 |
+
\ data;\n\nc)\tBusiness continuity and Client engagement planning in the event\
|
35 |
+
\ of both planned and unplanned system outages;\n\nd)\tProcesses and procedures\
|
36 |
+
\ specifying management of personnel and decision-making by qualified staff; and\n\
|
37 |
+
\ne)\tProcedures for the creation and management of services, interfaces and channels\
|
38 |
+
\ provided by or to third parties (as recipients and providers of data or services).\n"
|
39 |
+
- Other stakeholders. The Directors should include in the annual report an explanation
|
40 |
+
of the basis on which the Reporting Entity generates or preserves value over the
|
41 |
+
longer term (the business model) and the strategy for delivering the objectives
|
42 |
+
of the Reporting Entity.
|
43 |
+
- source_sentence: Could you elaborate on the types of 'relevant events' that must
|
44 |
+
be reported by Fund Administrators, particularly those which might undermine their
|
45 |
+
ability to fulfill their duties as per Rule 17.1.5(d)?
|
46 |
+
sentences:
|
47 |
+
- "The Regulator would expect any agreement required under this Rule 17.1.5 to\
|
48 |
+
\ include as a minimum the following provisions:\n(a)\tunambiguous descriptions\
|
49 |
+
\ and definitions of the activities and functions to be provided by the Fund Administrator\
|
50 |
+
\ and the duties to be performed by both parties;\n(b)\tan agreed standard in\
|
51 |
+
\ respect of resources and services supported as necessary by performance measures\
|
52 |
+
\ in accordance with the applicable legislation;\n(c)\tthe requirement for regular\
|
53 |
+
\ detailed reporting to a specified frequency from the Fund Administrator in respect\
|
54 |
+
\ of its duties and activities;\n(d)\tprovisions relating to the reporting of\
|
55 |
+
\ relevant events such as technological changes or error reporting and, in particular,\
|
56 |
+
\ any event which undermines the ability of the Fund Administrator to fulfil its\
|
57 |
+
\ duties;\n(e)\tthe requirement for an annual review (at a minimum) of the performance\
|
58 |
+
\ of the functions by the Fund Administrator; and\n(f)\tprovisions relating to\
|
59 |
+
\ records and adequate access by the Foreign Fund Manager, the Fund's auditor\
|
60 |
+
\ or any other Persons providing control or risk management functions for the\
|
61 |
+
\ Fund, as required by the Foreign Fund Manager or applicable laws to that Fund."
|
62 |
+
- "A Relevant Person which is part of a Group must ensure that it:\n(a)\thas developed\
|
63 |
+
\ and implemented policies and procedures for the sharing of information between\
|
64 |
+
\ Group entities, including the sharing of information relating to CDD and money\
|
65 |
+
\ laundering risks;\n(b)\thas in place adequate safeguards on the confidentiality\
|
66 |
+
\ and use of information exchanged between Group entities, including consideration\
|
67 |
+
\ of relevant data protection legislation;\n(c)\tremains aware of the money laundering\
|
68 |
+
\ risks of the Group as a whole and of its exposure to the Group and takes active\
|
69 |
+
\ steps to mitigate such risks;\n(d)\tcontributes to a Group-wide risk assessment\
|
70 |
+
\ to identify and assess money laundering risks for the Group; and\n(e)\tprovides\
|
71 |
+
\ its Group-wide compliance, audit and AML/TFS functions with customer account\
|
72 |
+
\ and Transaction information from its Branches and Subsidiaries when necessary\
|
73 |
+
\ for AML/TFS purposes."
|
74 |
+
- '
|
75 |
+
|
76 |
+
There are two methods for calculating the Equity Risk Capital Requirement: the
|
77 |
+
standard method and the simplified method. The standard method requires two separate
|
78 |
+
calculations. The first is Specific Risk and the second is General Market Risk.
|
79 |
+
The simplified method is easier to calculate but usually results in a higher Capital
|
80 |
+
Requirement than the standard method. In addition, Authorised Persons must calculate
|
81 |
+
an Interest Rate Risk Capital Requirement for a forward, a Future, an Option or
|
82 |
+
a company issued Warrant.'
|
83 |
+
- source_sentence: Can a Third Party be compelled to provide access to material under
|
84 |
+
Section 255 if that material is relevant to an issue that identifies the Third
|
85 |
+
Party?
|
86 |
+
sentences:
|
87 |
+
- This Chapter deals with the regulatory requirements arising out of the need for
|
88 |
+
Authorised Persons to carry out a self assessment of their risk which can be reviewed
|
89 |
+
and assessed by the Regulator. This Chapter details the Rules stipulating the
|
90 |
+
need to complete internal risk assessments by Authorised Persons in defined frequencies
|
91 |
+
and the Regulator's role in reviewing the results of such assessments. In the
|
92 |
+
case of Authorised Persons facing financial risks, the requirements in this Chapter
|
93 |
+
mandate completion of an Internal Capital Adequacy Assessment Process. The Regulator
|
94 |
+
will review the results of such internal risk assessments. This Chapter also sets
|
95 |
+
out how the Regulator may impose an additional Capital Requirement on a firm specific
|
96 |
+
basis in addition to the minimum requirement specified in Chapter 3 of these Rules
|
97 |
+
to address higher-than-normal risk.
|
98 |
+
- Accounting Records must be maintained by an Authorised Person and Recognised Body
|
99 |
+
such as to enable its Governing Body to ensure that any financial statements prepared
|
100 |
+
by the Authorised Person or Recognised Body comply with the Regulations and Rules.
|
101 |
+
- Section 255 applies to a Third Party as it applies to the person to whom the
|
102 |
+
notice to which this section applies was given, in so far as the material to which
|
103 |
+
access must be given under that section relates to the matter which identifies
|
104 |
+
the Third Party.
|
105 |
+
- source_sentence: What is the immediate action required by an Authorised Person or
|
106 |
+
Recognised Body upon discovering that an Employee may have committed a fraud against
|
107 |
+
a Customer?
|
108 |
+
sentences:
|
109 |
+
- "Fraud and errors. Each Authorised Person and Recognised Body must notify the\
|
110 |
+
\ Regulator immediately if one of the following events arises in relation to its\
|
111 |
+
\ activities in or from the ADGM:\n(a)\tit becomes aware that an Employee may\
|
112 |
+
\ have committed a fraud against one of its Customers;\n(b)\ta serious fraud has\
|
113 |
+
\ been committed against it;\n(c)\tit has reason to believe that a Person is acting\
|
114 |
+
\ with intent to commit a serious fraud against it;\n(d)\tit identifies significant\
|
115 |
+
\ irregularities in its accounting or other records, whether or not there is evidence\
|
116 |
+
\ of fraud; or\n(e)\tit suspects that one of its Employees who is Connected with\
|
117 |
+
\ the Authorised Person or Recognised Body's Regulated Activities may be guilty\
|
118 |
+
\ of serious misconduct concerning his honesty or integrity.\n"
|
119 |
+
- "If a Relevant Person acquires another business, either in whole or in substantial\
|
120 |
+
\ part, the Regulator would permit the Relevant Person to rely on the CDD conducted\
|
121 |
+
\ by the business it is acquiring, but would expect the Relevant Person to have\
|
122 |
+
\ done the following:\n(a)\tas part of its due diligence for the acquisition,\
|
123 |
+
\ to have taken a reasonable sample of the prospective customers to assess the\
|
124 |
+
\ quality of the CDD undertaken; and\n(b)\tto have undertaken CDD on all the customers\
|
125 |
+
\ to cover any deficiencies identified in (a) as soon as possible following the\
|
126 |
+
\ acquisition, prioritising high-risk customers."
|
127 |
+
- 'Additionally, given their heavy dependence on collecting and processing client
|
128 |
+
data and the risks of cyberattacks to their automated and largely digital mode
|
129 |
+
of operations, Digital Investment Managers must also put in place robust data
|
130 |
+
security policies and systems to ensure compliance with all relevant data protection
|
131 |
+
regulations, including the ADGM’s Data Protection Regulations and, as appropriate,
|
132 |
+
PRU 6.6 – 6.9.
|
133 |
+
|
134 |
+
'
|
135 |
+
- source_sentence: Are there any anticipated changes to the COBS Rule 17.3 / MIR Rule
|
136 |
+
3.2.1 that Authorised Persons should be preparing for in the near future? If so,
|
137 |
+
what is the expected timeline for these changes to take effect?
|
138 |
+
sentences:
|
139 |
+
- "A Relevant Person must ensure that its MLRO implements and has oversight of and\
|
140 |
+
\ is responsible for the following matters:\n(a)\tthe day-to-day operations for\
|
141 |
+
\ compliance by the Relevant Person with its AML/TFS policies, procedures, systems\
|
142 |
+
\ and controls;\n(b)\tacting as the point of contact to receive internal notifications\
|
143 |
+
\ of suspicious activity from the Relevant Person's Employees under Rule 14.2.2;\n\
|
144 |
+
(c)\ttaking appropriate action under Rule 14.3.1 following receipt of a notification\
|
145 |
+
\ from an Employee;\n(d)\tmaking, in accordance with Federal AML Legislation,\
|
146 |
+
\ Suspicious Activity/Transaction Reports;\n(e)\tacting as the point of contact\
|
147 |
+
\ within the Relevant Person for competent U.A.E. authorities and the Regulator\
|
148 |
+
\ regarding money laundering issues;\n(f)\tresponding promptly to any request\
|
149 |
+
\ for information made by competent U.A.E. authorities or the Regulator;\n(g)\t\
|
150 |
+
receiving and acting upon any relevant findings, recommendations, guidance, directives,\
|
151 |
+
\ resolutions, Sanctions, notices or other conclusions described in Chapter 11;\
|
152 |
+
\ and\n(h)\testablishing and maintaining an appropriate money laundering training\
|
153 |
+
\ programme and adequate awareness arrangements under Chapter 13."
|
154 |
+
- 'REGULATORY REQUIREMENTS FOR AUTHORISED PERSONS ENGAGED IN REGULATED ACTIVITIES
|
155 |
+
IN RELATION TO VIRTUAL ASSETS
|
156 |
+
|
157 |
+
Capital Requirements
|
158 |
+
|
159 |
+
When applying COBS Rule 17.3 / MIR Rule 3.2.1 to an Authorised Person, the FSRA
|
160 |
+
will apply proportionality in considering whether any additional capital buffer
|
161 |
+
must be held, based on the size, scope, complexity and nature of the activities
|
162 |
+
and operations of the Authorised Person and, if so, the appropriate amount of
|
163 |
+
regulatory capital required as an additional buffer. An Authorised Person that
|
164 |
+
the FSRA considers to be high risk may attract higher regulatory capital requirements.
|
165 |
+
|
166 |
+
'
|
167 |
+
- "In exceptional circumstances, where the Bail-in Tool is applied, the Regulator\
|
168 |
+
\ may exclude or partially exclude certain liabilities from the application of\
|
169 |
+
\ the Write Down or Conversion Power where—\n(a)\tit is not possible to bail-in\
|
170 |
+
\ that liability within a reasonable time despite the reasonable efforts of the\
|
171 |
+
\ Regulator;\n(b)\tthe exclusion is strictly necessary and is proportionate to\
|
172 |
+
\ achieve the continuity of Critical Functions and Core Business Lines in a manner\
|
173 |
+
\ that maintains the ability of the Institution in Resolution to continue key\
|
174 |
+
\ operations, services and transactions;\n(c)\tthe exclusion is strictly necessary\
|
175 |
+
\ and proportionate to avoid giving rise to widespread contagion, in particular\
|
176 |
+
\ as regards Deposits and Eligible Deposits which would severely disrupt the functioning\
|
177 |
+
\ of financial markets, including financial market infrastructures, in a manner\
|
178 |
+
\ that could cause broader financial instability; or\n(d)\tthe application of\
|
179 |
+
\ the Bail-in Tool to those liabilities would cause a destruction of value such\
|
180 |
+
\ that the losses borne by other creditors would be higher than if those liabilities\
|
181 |
+
\ were excluded from bail-in."
|
182 |
+
pipeline_tag: sentence-similarity
|
183 |
+
library_name: sentence-transformers
|
184 |
+
metrics:
|
185 |
+
- cosine_accuracy@1
|
186 |
+
- cosine_accuracy@3
|
187 |
+
- cosine_accuracy@5
|
188 |
+
- cosine_accuracy@10
|
189 |
+
- cosine_precision@1
|
190 |
+
- cosine_precision@3
|
191 |
+
- cosine_precision@5
|
192 |
+
- cosine_precision@10
|
193 |
+
- cosine_recall@1
|
194 |
+
- cosine_recall@3
|
195 |
+
- cosine_recall@5
|
196 |
+
- cosine_recall@10
|
197 |
+
- cosine_ndcg@10
|
198 |
+
- cosine_mrr@10
|
199 |
+
- cosine_map@100
|
200 |
+
- dot_accuracy@1
|
201 |
+
- dot_accuracy@3
|
202 |
+
- dot_accuracy@5
|
203 |
+
- dot_accuracy@10
|
204 |
+
- dot_precision@1
|
205 |
+
- dot_precision@3
|
206 |
+
- dot_precision@5
|
207 |
+
- dot_precision@10
|
208 |
+
- dot_recall@1
|
209 |
+
- dot_recall@3
|
210 |
+
- dot_recall@5
|
211 |
+
- dot_recall@10
|
212 |
+
- dot_ndcg@10
|
213 |
+
- dot_mrr@10
|
214 |
+
- dot_map@100
|
215 |
+
model-index:
|
216 |
+
- name: SentenceTransformer based on dunzhang/stella_en_1.5B_v5
|
217 |
+
results:
|
218 |
+
- task:
|
219 |
+
type: information-retrieval
|
220 |
+
name: Information Retrieval
|
221 |
+
dataset:
|
222 |
+
name: Unknown
|
223 |
+
type: unknown
|
224 |
+
metrics:
|
225 |
+
- type: cosine_accuracy@1
|
226 |
+
value: 0.6233859397417504
|
227 |
+
name: Cosine Accuracy@1
|
228 |
+
- type: cosine_accuracy@3
|
229 |
+
value: 0.7636298421807748
|
230 |
+
name: Cosine Accuracy@3
|
231 |
+
- type: cosine_accuracy@5
|
232 |
+
value: 0.8113342898134863
|
233 |
+
name: Cosine Accuracy@5
|
234 |
+
- type: cosine_accuracy@10
|
235 |
+
value: 0.8558106169296987
|
236 |
+
name: Cosine Accuracy@10
|
237 |
+
- type: cosine_precision@1
|
238 |
+
value: 0.6233859397417504
|
239 |
+
name: Cosine Precision@1
|
240 |
+
- type: cosine_precision@3
|
241 |
+
value: 0.2687709230033477
|
242 |
+
name: Cosine Precision@3
|
243 |
+
- type: cosine_precision@5
|
244 |
+
value: 0.17568149210903872
|
245 |
+
name: Cosine Precision@5
|
246 |
+
- type: cosine_precision@10
|
247 |
+
value: 0.09533715925394547
|
248 |
+
name: Cosine Precision@10
|
249 |
+
- type: cosine_recall@1
|
250 |
+
value: 0.5457735533237685
|
251 |
+
name: Cosine Recall@1
|
252 |
+
- type: cosine_recall@3
|
253 |
+
value: 0.6823290291726446
|
254 |
+
name: Cosine Recall@3
|
255 |
+
- type: cosine_recall@5
|
256 |
+
value: 0.7313605930176948
|
257 |
+
name: Cosine Recall@5
|
258 |
+
- type: cosine_recall@10
|
259 |
+
value: 0.7834947393591583
|
260 |
+
name: Cosine Recall@10
|
261 |
+
- type: cosine_ndcg@10
|
262 |
+
value: 0.6892546786573623
|
263 |
+
name: Cosine Ndcg@10
|
264 |
+
- type: cosine_mrr@10
|
265 |
+
value: 0.7027094577668452
|
266 |
+
name: Cosine Mrr@10
|
267 |
+
- type: cosine_map@100
|
268 |
+
value: 0.6454912452493724
|
269 |
+
name: Cosine Map@100
|
270 |
+
- type: dot_accuracy@1
|
271 |
+
value: 0.3446915351506456
|
272 |
+
name: Dot Accuracy@1
|
273 |
+
- type: dot_accuracy@3
|
274 |
+
value: 0.5656384505021521
|
275 |
+
name: Dot Accuracy@3
|
276 |
+
- type: dot_accuracy@5
|
277 |
+
value: 0.6639167862266858
|
278 |
+
name: Dot Accuracy@5
|
279 |
+
- type: dot_accuracy@10
|
280 |
+
value: 0.7786944045911047
|
281 |
+
name: Dot Accuracy@10
|
282 |
+
- type: dot_precision@1
|
283 |
+
value: 0.3446915351506456
|
284 |
+
name: Dot Precision@1
|
285 |
+
- type: dot_precision@3
|
286 |
+
value: 0.19548063127690102
|
287 |
+
name: Dot Precision@3
|
288 |
+
- type: dot_precision@5
|
289 |
+
value: 0.14031563845050213
|
290 |
+
name: Dot Precision@5
|
291 |
+
- type: dot_precision@10
|
292 |
+
value: 0.0854734576757532
|
293 |
+
name: Dot Precision@10
|
294 |
+
- type: dot_recall@1
|
295 |
+
value: 0.3028813964610234
|
296 |
+
name: Dot Recall@1
|
297 |
+
- type: dot_recall@3
|
298 |
+
value: 0.49997010999521757
|
299 |
+
name: Dot Recall@3
|
300 |
+
- type: dot_recall@5
|
301 |
+
value: 0.5915172166427547
|
302 |
+
name: Dot Recall@5
|
303 |
+
- type: dot_recall@10
|
304 |
+
value: 0.7070540411286466
|
305 |
+
name: Dot Recall@10
|
306 |
+
- type: dot_ndcg@10
|
307 |
+
value: 0.5127009608010437
|
308 |
+
name: Dot Ndcg@10
|
309 |
+
- type: dot_mrr@10
|
310 |
+
value: 0.4801890471635807
|
311 |
+
name: Dot Mrr@10
|
312 |
+
- type: dot_map@100
|
313 |
+
value: 0.4463977594142586
|
314 |
+
name: Dot Map@100
|
315 |
+
---
|
316 |
+
|
317 |
+
# SentenceTransformer based on dunzhang/stella_en_1.5B_v5
|
318 |
+
|
319 |
+
This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [dunzhang/stella_en_1.5B_v5](https://huggingface.co/dunzhang/stella_en_1.5B_v5). It maps sentences & paragraphs to a 1024-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
|
320 |
+
|
321 |
+
## Model Details
|
322 |
+
|
323 |
+
### Model Description
|
324 |
+
- **Model Type:** Sentence Transformer
|
325 |
+
- **Base model:** [dunzhang/stella_en_1.5B_v5](https://huggingface.co/dunzhang/stella_en_1.5B_v5) <!-- at revision 221e30586ab5186c4360cbb7aeb643b6efc9d8f8 -->
|
326 |
+
- **Maximum Sequence Length:** 512 tokens
|
327 |
+
- **Output Dimensionality:** 1024 tokens
|
328 |
+
- **Similarity Function:** Cosine Similarity
|
329 |
+
<!-- - **Training Dataset:** Unknown -->
|
330 |
+
<!-- - **Language:** Unknown -->
|
331 |
+
<!-- - **License:** Unknown -->
|
332 |
+
|
333 |
+
### Model Sources
|
334 |
+
|
335 |
+
- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
|
336 |
+
- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
|
337 |
+
- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
|
338 |
+
|
339 |
+
### Full Model Architecture
|
340 |
+
|
341 |
+
```
|
342 |
+
SentenceTransformer(
|
343 |
+
(0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: Qwen2Model
|
344 |
+
(1): Pooling({'word_embedding_dimension': 1536, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
|
345 |
+
(2): Dense({'in_features': 1536, 'out_features': 1024, 'bias': True, 'activation_function': 'torch.nn.modules.linear.Identity'})
|
346 |
+
)
|
347 |
+
```
|
348 |
+
|
349 |
+
## Usage
|
350 |
+
|
351 |
+
### Direct Usage (Sentence Transformers)
|
352 |
+
|
353 |
+
First install the Sentence Transformers library:
|
354 |
+
|
355 |
+
```bash
|
356 |
+
pip install -U sentence-transformers
|
357 |
+
```
|
358 |
+
|
359 |
+
Then you can load this model and run inference.
|
360 |
+
```python
|
361 |
+
from sentence_transformers import SentenceTransformer
|
362 |
+
|
363 |
+
# Download from the 🤗 Hub
|
364 |
+
model = SentenceTransformer("DrishtiSharma/stella_en_1.5B_v5-obliqa-5-epochs")
|
365 |
+
# Run inference
|
366 |
+
sentences = [
|
367 |
+
'Are there any anticipated changes to the COBS Rule 17.3 / MIR Rule 3.2.1 that Authorised Persons should be preparing for in the near future? If so, what is the expected timeline for these changes to take effect?',
|
368 |
+
'REGULATORY REQUIREMENTS FOR AUTHORISED PERSONS ENGAGED IN REGULATED ACTIVITIES IN RELATION TO VIRTUAL ASSETS\nCapital Requirements\nWhen applying COBS Rule 17.3 / MIR Rule 3.2.1 to an Authorised Person, the FSRA will apply proportionality in considering whether any additional capital buffer must be held, based on the size, scope, complexity and nature of the activities and operations of the Authorised Person and, if so, the appropriate amount of regulatory capital required as an additional buffer. An Authorised Person that the FSRA considers to be high risk may attract higher regulatory capital requirements.\n',
|
369 |
+
'In exceptional circumstances, where the Bail-in Tool is applied, the Regulator may exclude or partially exclude certain liabilities from the application of the Write Down or Conversion Power where—\n(a)\tit is not possible to bail-in that liability within a reasonable time despite the reasonable efforts of the Regulator;\n(b)\tthe exclusion is strictly necessary and is proportionate to achieve the continuity of Critical Functions and Core Business Lines in a manner that maintains the ability of the Institution in Resolution to continue key operations, services and transactions;\n(c)\tthe exclusion is strictly necessary and proportionate to avoid giving rise to widespread contagion, in particular as regards Deposits and Eligible Deposits which would severely disrupt the functioning of financial markets, including financial market infrastructures, in a manner that could cause broader financial instability; or\n(d)\tthe application of the Bail-in Tool to those liabilities would cause a destruction of value such that the losses borne by other creditors would be higher than if those liabilities were excluded from bail-in.',
|
370 |
+
]
|
371 |
+
embeddings = model.encode(sentences)
|
372 |
+
print(embeddings.shape)
|
373 |
+
# [3, 1024]
|
374 |
+
|
375 |
+
# Get the similarity scores for the embeddings
|
376 |
+
similarities = model.similarity(embeddings, embeddings)
|
377 |
+
print(similarities.shape)
|
378 |
+
# [3, 3]
|
379 |
+
```
|
380 |
+
|
381 |
+
<!--
|
382 |
+
### Direct Usage (Transformers)
|
383 |
+
|
384 |
+
<details><summary>Click to see the direct usage in Transformers</summary>
|
385 |
+
|
386 |
+
</details>
|
387 |
+
-->
|
388 |
+
|
389 |
+
<!--
|
390 |
+
### Downstream Usage (Sentence Transformers)
|
391 |
+
|
392 |
+
You can finetune this model on your own dataset.
|
393 |
+
|
394 |
+
<details><summary>Click to expand</summary>
|
395 |
+
|
396 |
+
</details>
|
397 |
+
-->
|
398 |
+
|
399 |
+
<!--
|
400 |
+
### Out-of-Scope Use
|
401 |
+
|
402 |
+
*List how the model may foreseeably be misused and address what users ought not to do with the model.*
|
403 |
+
-->
|
404 |
+
|
405 |
+
## Evaluation
|
406 |
+
|
407 |
+
### Metrics
|
408 |
+
|
409 |
+
#### Information Retrieval
|
410 |
+
|
411 |
+
* Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator)
|
412 |
+
|
413 |
+
| Metric | Value |
|
414 |
+
|:--------------------|:-----------|
|
415 |
+
| cosine_accuracy@1 | 0.6234 |
|
416 |
+
| cosine_accuracy@3 | 0.7636 |
|
417 |
+
| cosine_accuracy@5 | 0.8113 |
|
418 |
+
| cosine_accuracy@10 | 0.8558 |
|
419 |
+
| cosine_precision@1 | 0.6234 |
|
420 |
+
| cosine_precision@3 | 0.2688 |
|
421 |
+
| cosine_precision@5 | 0.1757 |
|
422 |
+
| cosine_precision@10 | 0.0953 |
|
423 |
+
| cosine_recall@1 | 0.5458 |
|
424 |
+
| cosine_recall@3 | 0.6823 |
|
425 |
+
| cosine_recall@5 | 0.7314 |
|
426 |
+
| cosine_recall@10 | 0.7835 |
|
427 |
+
| cosine_ndcg@10 | 0.6893 |
|
428 |
+
| cosine_mrr@10 | 0.7027 |
|
429 |
+
| **cosine_map@100** | **0.6455** |
|
430 |
+
| dot_accuracy@1 | 0.3447 |
|
431 |
+
| dot_accuracy@3 | 0.5656 |
|
432 |
+
| dot_accuracy@5 | 0.6639 |
|
433 |
+
| dot_accuracy@10 | 0.7787 |
|
434 |
+
| dot_precision@1 | 0.3447 |
|
435 |
+
| dot_precision@3 | 0.1955 |
|
436 |
+
| dot_precision@5 | 0.1403 |
|
437 |
+
| dot_precision@10 | 0.0855 |
|
438 |
+
| dot_recall@1 | 0.3029 |
|
439 |
+
| dot_recall@3 | 0.5 |
|
440 |
+
| dot_recall@5 | 0.5915 |
|
441 |
+
| dot_recall@10 | 0.7071 |
|
442 |
+
| dot_ndcg@10 | 0.5127 |
|
443 |
+
| dot_mrr@10 | 0.4802 |
|
444 |
+
| dot_map@100 | 0.4464 |
|
445 |
+
|
446 |
+
<!--
|
447 |
+
## Bias, Risks and Limitations
|
448 |
+
|
449 |
+
*What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
|
450 |
+
-->
|
451 |
+
|
452 |
+
<!--
|
453 |
+
### Recommendations
|
454 |
+
|
455 |
+
*What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
|
456 |
+
-->
|
457 |
+
|
458 |
+
## Training Details
|
459 |
+
|
460 |
+
### Training Dataset
|
461 |
+
|
462 |
+
#### Unnamed Dataset
|
463 |
+
|
464 |
+
|
465 |
+
* Size: 22,291 training samples
|
466 |
+
* Columns: <code>sentence_0</code> and <code>sentence_1</code>
|
467 |
+
* Approximate statistics based on the first 1000 samples:
|
468 |
+
| | sentence_0 | sentence_1 |
|
469 |
+
|:--------|:-----------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------|
|
470 |
+
| type | string | string |
|
471 |
+
| details | <ul><li>min: 16 tokens</li><li>mean: 33.53 tokens</li><li>max: 71 tokens</li></ul> | <ul><li>min: 15 tokens</li><li>mean: 118.07 tokens</li><li>max: 512 tokens</li></ul> |
|
472 |
+
* Samples:
|
473 |
+
| sentence_0 | sentence_1 |
|
474 |
+
|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
475 |
+
| <code>What constitutes a "sufficiently advanced stage of development" for a FinTech Proposal to qualify for a live test under the RegLab framework, as mentioned in criterion (c)?</code> | <code>Evaluation Criteria. To qualify for authorisation under the RegLab framework, the applicant must demonstrate how it satisfies the following evaluation criteria:<br>(a) the FinTech Proposal promotes FinTech innovation, in terms of the business application and deployment model of the technology.<br>(b) the FinTech Proposal has the potential to:<br>i. promote significant growth, efficiency or competition in the financial sector;<br>ii. promote better risk management solutions and regulatory outcomes for the financial industry; or<br>iii. improve the choices and welfare of clients.<br>(c) the FinTech Proposal is at a sufficiently advanced stage of development to mount a live test.<br>(d) the FinTech Proposal can be deployed in the ADGM and the UAE on a broader scale or contribute to the development of ADGM as a financial centre, and, if so, how the applicant intends to do so on completion of the validity period.<br><br></code> |
|
476 |
+
| <code>Are there any upcoming regulatory changes that Authorised Persons should be aware of regarding the handling or classification of Virtual Assets within the ADGM?</code> | <code>CONCEPTS RELATING TO THE DISCLOSURE OF PETROLEUM ACTIVITIES<br>Petroleum Projects and materiality<br>If a Petroleum Reporting Entity discloses estimates that it viewed as material at the time of disclosure, but subsequently forms a view that they are no longer material, the FSRA expects the Petroleum Reporting Entity to make a further disclosure providing the clear rationale for the change view on materiality. Such reasoning would generally follow the considerations outlined in paragraph 24 above.<br><br></code> |
|
477 |
+
| <code>What are the ADGM's requirements for VC Managers regarding the periodic assessment and audit of their compliance frameworks, and who is qualified to conduct such assessments?</code> | <code>Principle 1 – A Robust and Transparent Risk-Based Regulatory Framework. The framework encompasses a suite of regulations, activity-specific rules and supporting guidance that delivers protection to investors, maintains market integrity and future-proofs against financial stability risks. In particular, it introduces a clear taxonomy defining VAs as commodities within the wider Digital Asset universe and requires the licensing of entities engaged in regulated activities that use VAs within ADGM.</code> |
|
478 |
+
* Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:
|
479 |
+
```json
|
480 |
+
{
|
481 |
+
"scale": 20.0,
|
482 |
+
"similarity_fct": "cos_sim"
|
483 |
+
}
|
484 |
+
```
|
485 |
+
|
486 |
+
### Training Hyperparameters
|
487 |
+
#### Non-Default Hyperparameters
|
488 |
+
|
489 |
+
- `eval_strategy`: steps
|
490 |
+
- `per_device_train_batch_size`: 10
|
491 |
+
- `per_device_eval_batch_size`: 10
|
492 |
+
- `multi_dataset_batch_sampler`: round_robin
|
493 |
+
|
494 |
+
#### All Hyperparameters
|
495 |
+
<details><summary>Click to expand</summary>
|
496 |
+
|
497 |
+
- `overwrite_output_dir`: False
|
498 |
+
- `do_predict`: False
|
499 |
+
- `eval_strategy`: steps
|
500 |
+
- `prediction_loss_only`: True
|
501 |
+
- `per_device_train_batch_size`: 10
|
502 |
+
- `per_device_eval_batch_size`: 10
|
503 |
+
- `per_gpu_train_batch_size`: None
|
504 |
+
- `per_gpu_eval_batch_size`: None
|
505 |
+
- `gradient_accumulation_steps`: 1
|
506 |
+
- `eval_accumulation_steps`: None
|
507 |
+
- `torch_empty_cache_steps`: None
|
508 |
+
- `learning_rate`: 5e-05
|
509 |
+
- `weight_decay`: 0.0
|
510 |
+
- `adam_beta1`: 0.9
|
511 |
+
- `adam_beta2`: 0.999
|
512 |
+
- `adam_epsilon`: 1e-08
|
513 |
+
- `max_grad_norm`: 1
|
514 |
+
- `num_train_epochs`: 3
|
515 |
+
- `max_steps`: -1
|
516 |
+
- `lr_scheduler_type`: linear
|
517 |
+
- `lr_scheduler_kwargs`: {}
|
518 |
+
- `warmup_ratio`: 0.0
|
519 |
+
- `warmup_steps`: 0
|
520 |
+
- `log_level`: passive
|
521 |
+
- `log_level_replica`: warning
|
522 |
+
- `log_on_each_node`: True
|
523 |
+
- `logging_nan_inf_filter`: True
|
524 |
+
- `save_safetensors`: True
|
525 |
+
- `save_on_each_node`: False
|
526 |
+
- `save_only_model`: False
|
527 |
+
- `restore_callback_states_from_checkpoint`: False
|
528 |
+
- `no_cuda`: False
|
529 |
+
- `use_cpu`: False
|
530 |
+
- `use_mps_device`: False
|
531 |
+
- `seed`: 42
|
532 |
+
- `data_seed`: None
|
533 |
+
- `jit_mode_eval`: False
|
534 |
+
- `use_ipex`: False
|
535 |
+
- `bf16`: False
|
536 |
+
- `fp16`: False
|
537 |
+
- `fp16_opt_level`: O1
|
538 |
+
- `half_precision_backend`: auto
|
539 |
+
- `bf16_full_eval`: False
|
540 |
+
- `fp16_full_eval`: False
|
541 |
+
- `tf32`: None
|
542 |
+
- `local_rank`: 0
|
543 |
+
- `ddp_backend`: None
|
544 |
+
- `tpu_num_cores`: None
|
545 |
+
- `tpu_metrics_debug`: False
|
546 |
+
- `debug`: []
|
547 |
+
- `dataloader_drop_last`: False
|
548 |
+
- `dataloader_num_workers`: 0
|
549 |
+
- `dataloader_prefetch_factor`: None
|
550 |
+
- `past_index`: -1
|
551 |
+
- `disable_tqdm`: False
|
552 |
+
- `remove_unused_columns`: True
|
553 |
+
- `label_names`: None
|
554 |
+
- `load_best_model_at_end`: False
|
555 |
+
- `ignore_data_skip`: False
|
556 |
+
- `fsdp`: []
|
557 |
+
- `fsdp_min_num_params`: 0
|
558 |
+
- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
|
559 |
+
- `fsdp_transformer_layer_cls_to_wrap`: None
|
560 |
+
- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
|
561 |
+
- `deepspeed`: None
|
562 |
+
- `label_smoothing_factor`: 0.0
|
563 |
+
- `optim`: adamw_torch
|
564 |
+
- `optim_args`: None
|
565 |
+
- `adafactor`: False
|
566 |
+
- `group_by_length`: False
|
567 |
+
- `length_column_name`: length
|
568 |
+
- `ddp_find_unused_parameters`: None
|
569 |
+
- `ddp_bucket_cap_mb`: None
|
570 |
+
- `ddp_broadcast_buffers`: False
|
571 |
+
- `dataloader_pin_memory`: True
|
572 |
+
- `dataloader_persistent_workers`: False
|
573 |
+
- `skip_memory_metrics`: True
|
574 |
+
- `use_legacy_prediction_loop`: False
|
575 |
+
- `push_to_hub`: False
|
576 |
+
- `resume_from_checkpoint`: None
|
577 |
+
- `hub_model_id`: None
|
578 |
+
- `hub_strategy`: every_save
|
579 |
+
- `hub_private_repo`: False
|
580 |
+
- `hub_always_push`: False
|
581 |
+
- `gradient_checkpointing`: False
|
582 |
+
- `gradient_checkpointing_kwargs`: None
|
583 |
+
- `include_inputs_for_metrics`: False
|
584 |
+
- `eval_do_concat_batches`: True
|
585 |
+
- `fp16_backend`: auto
|
586 |
+
- `push_to_hub_model_id`: None
|
587 |
+
- `push_to_hub_organization`: None
|
588 |
+
- `mp_parameters`:
|
589 |
+
- `auto_find_batch_size`: False
|
590 |
+
- `full_determinism`: False
|
591 |
+
- `torchdynamo`: None
|
592 |
+
- `ray_scope`: last
|
593 |
+
- `ddp_timeout`: 1800
|
594 |
+
- `torch_compile`: False
|
595 |
+
- `torch_compile_backend`: None
|
596 |
+
- `torch_compile_mode`: None
|
597 |
+
- `dispatch_batches`: None
|
598 |
+
- `split_batches`: None
|
599 |
+
- `include_tokens_per_second`: False
|
600 |
+
- `include_num_input_tokens_seen`: False
|
601 |
+
- `neftune_noise_alpha`: None
|
602 |
+
- `optim_target_modules`: None
|
603 |
+
- `batch_eval_metrics`: False
|
604 |
+
- `eval_on_start`: False
|
605 |
+
- `use_liger_kernel`: False
|
606 |
+
- `eval_use_gather_object`: False
|
607 |
+
- `batch_sampler`: batch_sampler
|
608 |
+
- `multi_dataset_batch_sampler`: round_robin
|
609 |
+
|
610 |
+
</details>
|
611 |
+
|
612 |
+
### Training Logs
|
613 |
+
| Epoch | Step | Training Loss | cosine_map@100 |
|
614 |
+
|:------:|:----:|:-------------:|:--------------:|
|
615 |
+
| 0.0897 | 200 | - | 0.5597 |
|
616 |
+
| 0.1794 | 400 | - | 0.5674 |
|
617 |
+
| 0.2242 | 500 | 0.7416 | - |
|
618 |
+
| 0.2691 | 600 | - | 0.4684 |
|
619 |
+
| 0.3587 | 800 | - | 0.5593 |
|
620 |
+
| 0.4484 | 1000 | 0.6613 | 0.5502 |
|
621 |
+
| 0.5381 | 1200 | - | 0.5740 |
|
622 |
+
| 0.6278 | 1400 | - | 0.5398 |
|
623 |
+
| 0.6726 | 1500 | 0.5382 | - |
|
624 |
+
| 0.7175 | 1600 | - | 0.5820 |
|
625 |
+
| 0.8072 | 1800 | - | 0.5770 |
|
626 |
+
| 0.8969 | 2000 | 0.4959 | 0.5834 |
|
627 |
+
| 0.9865 | 2200 | - | 0.5382 |
|
628 |
+
| 1.0 | 2230 | - | 0.3223 |
|
629 |
+
| 1.0762 | 2400 | - | 0.5532 |
|
630 |
+
| 1.1211 | 2500 | 0.3796 | - |
|
631 |
+
| 1.1659 | 2600 | - | 0.5817 |
|
632 |
+
| 1.2556 | 2800 | - | 0.5929 |
|
633 |
+
| 1.3453 | 3000 | 0.367 | 0.5937 |
|
634 |
+
| 1.4350 | 3200 | - | 0.5907 |
|
635 |
+
| 1.5247 | 3400 | - | 0.6024 |
|
636 |
+
| 1.5695 | 3500 | 0.2877 | - |
|
637 |
+
| 1.6143 | 3600 | - | 0.6006 |
|
638 |
+
| 1.7040 | 3800 | - | 0.6131 |
|
639 |
+
| 1.7937 | 4000 | 0.2818 | 0.6167 |
|
640 |
+
| 1.8834 | 4200 | - | 0.6040 |
|
641 |
+
| 1.9731 | 4400 | - | 0.6144 |
|
642 |
+
| 2.0 | 4460 | - | 0.6225 |
|
643 |
+
| 2.0179 | 4500 | 0.2529 | - |
|
644 |
+
| 2.0628 | 4600 | - | 0.6196 |
|
645 |
+
| 2.1525 | 4800 | - | 0.6222 |
|
646 |
+
| 2.2422 | 5000 | 0.1409 | 0.6278 |
|
647 |
+
| 2.3318 | 5200 | - | 0.6337 |
|
648 |
+
| 2.4215 | 5400 | - | 0.6409 |
|
649 |
+
| 2.4664 | 5500 | 0.1213 | - |
|
650 |
+
| 2.5112 | 5600 | - | 0.6424 |
|
651 |
+
| 2.6009 | 5800 | - | 0.6412 |
|
652 |
+
| 2.6906 | 6000 | 0.1218 | 0.6432 |
|
653 |
+
| 2.7803 | 6200 | - | 0.6456 |
|
654 |
+
| 2.8700 | 6400 | - | 0.6446 |
|
655 |
+
| 2.9148 | 6500 | 0.1247 | - |
|
656 |
+
| 2.9596 | 6600 | - | 0.6458 |
|
657 |
+
| 3.0 | 6690 | - | 0.6455 |
|
658 |
+
|
659 |
+
|
660 |
+
### Framework Versions
|
661 |
+
- Python: 3.10.12
|
662 |
+
- Sentence Transformers: 3.1.1
|
663 |
+
- Transformers: 4.45.2
|
664 |
+
- PyTorch: 2.1.0+cu118
|
665 |
+
- Accelerate: 1.2.0.dev0
|
666 |
+
- Datasets: 3.1.0
|
667 |
+
- Tokenizers: 0.20.3
|
668 |
+
|
669 |
+
## Citation
|
670 |
+
|
671 |
+
### BibTeX
|
672 |
+
|
673 |
+
#### Sentence Transformers
|
674 |
+
```bibtex
|
675 |
+
@inproceedings{reimers-2019-sentence-bert,
|
676 |
+
title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
|
677 |
+
author = "Reimers, Nils and Gurevych, Iryna",
|
678 |
+
booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
|
679 |
+
month = "11",
|
680 |
+
year = "2019",
|
681 |
+
publisher = "Association for Computational Linguistics",
|
682 |
+
url = "https://arxiv.org/abs/1908.10084",
|
683 |
+
}
|
684 |
+
```
|
685 |
+
|
686 |
+
#### MultipleNegativesRankingLoss
|
687 |
+
```bibtex
|
688 |
+
@misc{henderson2017efficient,
|
689 |
+
title={Efficient Natural Language Response Suggestion for Smart Reply},
|
690 |
+
author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil},
|
691 |
+
year={2017},
|
692 |
+
eprint={1705.00652},
|
693 |
+
archivePrefix={arXiv},
|
694 |
+
primaryClass={cs.CL}
|
695 |
+
}
|
696 |
+
```
|
697 |
+
|
698 |
+
<!--
|
699 |
+
## Glossary
|
700 |
+
|
701 |
+
*Clearly define terms in order to be accessible across audiences.*
|
702 |
+
-->
|
703 |
+
|
704 |
+
<!--
|
705 |
+
## Model Card Authors
|
706 |
+
|
707 |
+
*Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
|
708 |
+
-->
|
709 |
+
|
710 |
+
<!--
|
711 |
+
## Model Card Contact
|
712 |
+
|
713 |
+
*Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
|
714 |
+
-->
|
added_tokens.json
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"<|endoftext|>": 151643,
|
3 |
+
"<|im_end|>": 151645,
|
4 |
+
"<|im_start|>": 151644
|
5 |
+
}
|
config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "dunzhang/stella_en_1.5B_v5",
|
3 |
+
"architectures": [
|
4 |
+
"Qwen2Model"
|
5 |
+
],
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"auto_map": {
|
8 |
+
"AutoModel": "dunzhang/stella_en_1.5B_v5--modeling_qwen.Qwen2Model",
|
9 |
+
"AutoModelForCausalLM": "dunzhang/stella_en_1.5B_v5--modeling_qwen.Qwen2ForCausalLM",
|
10 |
+
"AutoModelForSequenceClassification": "dunzhang/stella_en_1.5B_v5--modeling_qwen.Qwen2ForSequenceClassification"
|
11 |
+
},
|
12 |
+
"bos_token_id": 151643,
|
13 |
+
"eos_token_id": 151643,
|
14 |
+
"hidden_act": "silu",
|
15 |
+
"hidden_size": 1536,
|
16 |
+
"initializer_range": 0.02,
|
17 |
+
"intermediate_size": 8960,
|
18 |
+
"max_position_embeddings": 131072,
|
19 |
+
"max_window_layers": 21,
|
20 |
+
"model_type": "qwen2",
|
21 |
+
"num_attention_heads": 12,
|
22 |
+
"num_hidden_layers": 28,
|
23 |
+
"num_key_value_heads": 2,
|
24 |
+
"rms_norm_eps": 1e-06,
|
25 |
+
"rope_scaling": null,
|
26 |
+
"rope_theta": 1000000.0,
|
27 |
+
"sliding_window": null,
|
28 |
+
"tie_word_embeddings": false,
|
29 |
+
"torch_dtype": "float32",
|
30 |
+
"transformers_version": "4.45.2",
|
31 |
+
"use_cache": true,
|
32 |
+
"use_sliding_window": false,
|
33 |
+
"vocab_size": 151646
|
34 |
+
}
|
config_sentence_transformers.json
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__version__": {
|
3 |
+
"sentence_transformers": "3.1.1",
|
4 |
+
"transformers": "4.45.2",
|
5 |
+
"pytorch": "2.1.0+cu118"
|
6 |
+
},
|
7 |
+
"prompts": {
|
8 |
+
"s2p_query": "Instruct: Given a web search query, retrieve relevant passages that answer the query.\nQuery: ",
|
9 |
+
"s2s_query": "Instruct: Retrieve semantically similar text.\nQuery: "
|
10 |
+
},
|
11 |
+
"default_prompt_name": null,
|
12 |
+
"similarity_fn_name": "cosine"
|
13 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
model-00001-of-00002.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1ad749bd5b680f1711c07242a0f6fb58dc7f9e382a88b7f1fc7e9109686884da
|
3 |
+
size 4994887136
|
model-00002-of-00002.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3cc402883b4d5edb25db49a88cc9d4def40750a2f1151935c5322ec1111a68f2
|
3 |
+
size 1178224504
|
model.safetensors.index.json
ADDED
@@ -0,0 +1,345 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 6173075456
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"embed_tokens.weight": "model-00001-of-00002.safetensors",
|
7 |
+
"layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
8 |
+
"layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
9 |
+
"layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
10 |
+
"layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
11 |
+
"layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
12 |
+
"layers.0.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
13 |
+
"layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
14 |
+
"layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
15 |
+
"layers.0.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
16 |
+
"layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
17 |
+
"layers.0.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
18 |
+
"layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
19 |
+
"layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
20 |
+
"layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
21 |
+
"layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
22 |
+
"layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
23 |
+
"layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
24 |
+
"layers.1.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
25 |
+
"layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
26 |
+
"layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
27 |
+
"layers.1.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
28 |
+
"layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
29 |
+
"layers.1.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
30 |
+
"layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
31 |
+
"layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
32 |
+
"layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
33 |
+
"layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
34 |
+
"layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
35 |
+
"layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
36 |
+
"layers.10.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
37 |
+
"layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
38 |
+
"layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
39 |
+
"layers.10.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
40 |
+
"layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
41 |
+
"layers.10.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
42 |
+
"layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
43 |
+
"layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
44 |
+
"layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
45 |
+
"layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
46 |
+
"layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
47 |
+
"layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
48 |
+
"layers.11.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
49 |
+
"layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
50 |
+
"layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
51 |
+
"layers.11.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
52 |
+
"layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
53 |
+
"layers.11.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
54 |
+
"layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
55 |
+
"layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
56 |
+
"layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
57 |
+
"layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
58 |
+
"layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
59 |
+
"layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
60 |
+
"layers.12.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
61 |
+
"layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
62 |
+
"layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
63 |
+
"layers.12.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
64 |
+
"layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
65 |
+
"layers.12.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
66 |
+
"layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
67 |
+
"layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
68 |
+
"layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
69 |
+
"layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
70 |
+
"layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
71 |
+
"layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
72 |
+
"layers.13.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
73 |
+
"layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
74 |
+
"layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
75 |
+
"layers.13.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
76 |
+
"layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
77 |
+
"layers.13.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
78 |
+
"layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
79 |
+
"layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
80 |
+
"layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
81 |
+
"layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
82 |
+
"layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
83 |
+
"layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
84 |
+
"layers.14.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
85 |
+
"layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
86 |
+
"layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
87 |
+
"layers.14.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
88 |
+
"layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
89 |
+
"layers.14.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
90 |
+
"layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
91 |
+
"layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
92 |
+
"layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
93 |
+
"layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
94 |
+
"layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
95 |
+
"layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
96 |
+
"layers.15.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
97 |
+
"layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
98 |
+
"layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
99 |
+
"layers.15.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
100 |
+
"layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
101 |
+
"layers.15.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
102 |
+
"layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
103 |
+
"layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
104 |
+
"layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
105 |
+
"layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
106 |
+
"layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
107 |
+
"layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
108 |
+
"layers.16.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
109 |
+
"layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
110 |
+
"layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
111 |
+
"layers.16.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
112 |
+
"layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
113 |
+
"layers.16.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
114 |
+
"layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
115 |
+
"layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
116 |
+
"layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
117 |
+
"layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
118 |
+
"layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
119 |
+
"layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
120 |
+
"layers.17.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
121 |
+
"layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
122 |
+
"layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
123 |
+
"layers.17.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
124 |
+
"layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
125 |
+
"layers.17.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
126 |
+
"layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
127 |
+
"layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
128 |
+
"layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
129 |
+
"layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
130 |
+
"layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
131 |
+
"layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
132 |
+
"layers.18.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
133 |
+
"layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
134 |
+
"layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
135 |
+
"layers.18.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
136 |
+
"layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
137 |
+
"layers.18.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
138 |
+
"layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
139 |
+
"layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
140 |
+
"layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
141 |
+
"layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
142 |
+
"layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
143 |
+
"layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
144 |
+
"layers.19.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
145 |
+
"layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
146 |
+
"layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
147 |
+
"layers.19.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
148 |
+
"layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
149 |
+
"layers.19.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
150 |
+
"layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
151 |
+
"layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
152 |
+
"layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
153 |
+
"layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
154 |
+
"layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
155 |
+
"layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
156 |
+
"layers.2.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
157 |
+
"layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
158 |
+
"layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
159 |
+
"layers.2.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
160 |
+
"layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
161 |
+
"layers.2.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
162 |
+
"layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
163 |
+
"layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
164 |
+
"layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
165 |
+
"layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
166 |
+
"layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
167 |
+
"layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
168 |
+
"layers.20.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
169 |
+
"layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
170 |
+
"layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
171 |
+
"layers.20.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
172 |
+
"layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
173 |
+
"layers.20.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
174 |
+
"layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
175 |
+
"layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
176 |
+
"layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
177 |
+
"layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
178 |
+
"layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
179 |
+
"layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
180 |
+
"layers.21.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
181 |
+
"layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
182 |
+
"layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
183 |
+
"layers.21.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
184 |
+
"layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
185 |
+
"layers.21.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
186 |
+
"layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
187 |
+
"layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
188 |
+
"layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
189 |
+
"layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
190 |
+
"layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
191 |
+
"layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
192 |
+
"layers.22.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
|
193 |
+
"layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
194 |
+
"layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
195 |
+
"layers.22.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
|
196 |
+
"layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
197 |
+
"layers.22.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
|
198 |
+
"layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
199 |
+
"layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
200 |
+
"layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
201 |
+
"layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
202 |
+
"layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
203 |
+
"layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
204 |
+
"layers.23.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
|
205 |
+
"layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
206 |
+
"layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
207 |
+
"layers.23.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
|
208 |
+
"layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
209 |
+
"layers.23.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
|
210 |
+
"layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
211 |
+
"layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
212 |
+
"layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
213 |
+
"layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
214 |
+
"layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
215 |
+
"layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
216 |
+
"layers.24.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
|
217 |
+
"layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
218 |
+
"layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
219 |
+
"layers.24.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
|
220 |
+
"layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
221 |
+
"layers.24.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
|
222 |
+
"layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
223 |
+
"layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
224 |
+
"layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
225 |
+
"layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
226 |
+
"layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
227 |
+
"layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
228 |
+
"layers.25.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
|
229 |
+
"layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
230 |
+
"layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
231 |
+
"layers.25.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
|
232 |
+
"layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
233 |
+
"layers.25.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
|
234 |
+
"layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
235 |
+
"layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
236 |
+
"layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
237 |
+
"layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
238 |
+
"layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
239 |
+
"layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
240 |
+
"layers.26.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
|
241 |
+
"layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
242 |
+
"layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
243 |
+
"layers.26.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
|
244 |
+
"layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
245 |
+
"layers.26.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
|
246 |
+
"layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
247 |
+
"layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
248 |
+
"layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
249 |
+
"layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
|
250 |
+
"layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
|
251 |
+
"layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
252 |
+
"layers.27.self_attn.k_proj.bias": "model-00002-of-00002.safetensors",
|
253 |
+
"layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
|
254 |
+
"layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
255 |
+
"layers.27.self_attn.q_proj.bias": "model-00002-of-00002.safetensors",
|
256 |
+
"layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
|
257 |
+
"layers.27.self_attn.v_proj.bias": "model-00002-of-00002.safetensors",
|
258 |
+
"layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
|
259 |
+
"layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
260 |
+
"layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
261 |
+
"layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
262 |
+
"layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
263 |
+
"layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
264 |
+
"layers.3.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
265 |
+
"layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
266 |
+
"layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
267 |
+
"layers.3.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
268 |
+
"layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
269 |
+
"layers.3.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
270 |
+
"layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
271 |
+
"layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
272 |
+
"layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
273 |
+
"layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
274 |
+
"layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
275 |
+
"layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
276 |
+
"layers.4.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
277 |
+
"layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
278 |
+
"layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
279 |
+
"layers.4.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
280 |
+
"layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
281 |
+
"layers.4.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
282 |
+
"layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
283 |
+
"layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
284 |
+
"layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
285 |
+
"layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
286 |
+
"layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
287 |
+
"layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
288 |
+
"layers.5.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
289 |
+
"layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
290 |
+
"layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
291 |
+
"layers.5.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
292 |
+
"layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
293 |
+
"layers.5.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
294 |
+
"layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
295 |
+
"layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
296 |
+
"layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
297 |
+
"layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
298 |
+
"layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
299 |
+
"layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
300 |
+
"layers.6.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
301 |
+
"layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
302 |
+
"layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
303 |
+
"layers.6.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
304 |
+
"layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
305 |
+
"layers.6.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
306 |
+
"layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
307 |
+
"layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
308 |
+
"layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
309 |
+
"layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
310 |
+
"layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
311 |
+
"layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
312 |
+
"layers.7.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
313 |
+
"layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
314 |
+
"layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
315 |
+
"layers.7.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
316 |
+
"layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
317 |
+
"layers.7.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
318 |
+
"layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
319 |
+
"layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
320 |
+
"layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
321 |
+
"layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
322 |
+
"layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
323 |
+
"layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
324 |
+
"layers.8.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
325 |
+
"layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
326 |
+
"layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
327 |
+
"layers.8.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
328 |
+
"layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
329 |
+
"layers.8.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
330 |
+
"layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
331 |
+
"layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
332 |
+
"layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
333 |
+
"layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
|
334 |
+
"layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
|
335 |
+
"layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
336 |
+
"layers.9.self_attn.k_proj.bias": "model-00001-of-00002.safetensors",
|
337 |
+
"layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
|
338 |
+
"layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
339 |
+
"layers.9.self_attn.q_proj.bias": "model-00001-of-00002.safetensors",
|
340 |
+
"layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
|
341 |
+
"layers.9.self_attn.v_proj.bias": "model-00001-of-00002.safetensors",
|
342 |
+
"layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
|
343 |
+
"norm.weight": "model-00002-of-00002.safetensors"
|
344 |
+
}
|
345 |
+
}
|
modules.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"idx": 0,
|
4 |
+
"name": "0",
|
5 |
+
"path": "",
|
6 |
+
"type": "sentence_transformers.models.Transformer"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"idx": 1,
|
10 |
+
"name": "1",
|
11 |
+
"path": "1_Pooling",
|
12 |
+
"type": "sentence_transformers.models.Pooling"
|
13 |
+
},
|
14 |
+
{
|
15 |
+
"idx": 2,
|
16 |
+
"name": "2",
|
17 |
+
"path": "2_Dense",
|
18 |
+
"type": "sentence_transformers.models.Dense"
|
19 |
+
}
|
20 |
+
]
|
sentence_bert_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_seq_length": 512,
|
3 |
+
"do_lower_case": false
|
4 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<|im_start|>",
|
4 |
+
"<|im_end|>"
|
5 |
+
],
|
6 |
+
"eos_token": {
|
7 |
+
"content": "<|endoftext|>",
|
8 |
+
"lstrip": false,
|
9 |
+
"normalized": false,
|
10 |
+
"rstrip": false,
|
11 |
+
"single_word": false
|
12 |
+
},
|
13 |
+
"pad_token": {
|
14 |
+
"content": "<|endoftext|>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": false,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false
|
19 |
+
}
|
20 |
+
}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f9f198d74f8167446195840e984e047a0336be641303805c5bb11e25d9ffbe90
|
3 |
+
size 11419303
|
tokenizer_config.json
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_eos_token": true,
|
3 |
+
"add_prefix_space": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"151643": {
|
6 |
+
"content": "<|endoftext|>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"151644": {
|
14 |
+
"content": "<|im_start|>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": false,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
},
|
21 |
+
"151645": {
|
22 |
+
"content": "<|im_end|>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": false,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false,
|
27 |
+
"special": true
|
28 |
+
}
|
29 |
+
},
|
30 |
+
"additional_special_tokens": [
|
31 |
+
"<|im_start|>",
|
32 |
+
"<|im_end|>"
|
33 |
+
],
|
34 |
+
"auto_map": {
|
35 |
+
"AutoTokenizer": [
|
36 |
+
"dunzhang/stella_en_1.5B_v5--tokenization_qwen.Qwen2Tokenizer",
|
37 |
+
"dunzhang/stella_en_1.5B_v5--tokenization_qwen.Qwen2TokenizerFast"
|
38 |
+
]
|
39 |
+
},
|
40 |
+
"bos_token": null,
|
41 |
+
"chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
42 |
+
"clean_up_tokenization_spaces": false,
|
43 |
+
"eos_token": "<|endoftext|>",
|
44 |
+
"errors": "replace",
|
45 |
+
"model_max_length": 512,
|
46 |
+
"pad_token": "<|endoftext|>",
|
47 |
+
"split_special_tokens": false,
|
48 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
49 |
+
"unk_token": null
|
50 |
+
}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|