Spaces:
Running
on
Zero
Running
on
Zero
carlosep93
commited on
Commit
·
ac24b2e
1
Parent(s):
51a8540
added accelerate support
Browse files- app.py +3 -6
- requirements.txt +2 -1
app.py
CHANGED
@@ -8,11 +8,8 @@ model_id = "BSC-LT/salamandraTA-2b"
|
|
8 |
|
9 |
# Load tokenizer and model
|
10 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
11 |
-
model = AutoModelForCausalLM.from_pretrained(model_id)
|
12 |
-
|
13 |
# Move model to GPU if available
|
14 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
15 |
-
model.to(device)
|
16 |
languages = [ "Spanish", "Catalan", "English", "French", "German", "Italian", "Portuguese", "Euskera", "Galician",
|
17 |
"Bulgarian", "Czech", "Lithuanian", "Croatian", "Dutch", "Romanian", "Danish", "Greek", "Finnish",
|
18 |
"Hungarian", "Slovak", "Slovenian", "Estonian", "Polish", "Latvian", "Swedish", "Maltese",
|
@@ -21,14 +18,14 @@ languages = [ "Spanish", "Catalan", "English", "French", "German", "Italian", "P
|
|
21 |
example_sentence = ["Ahir se'n va anar, va agafar les seves coses i es va posar a navegar."]
|
22 |
|
23 |
|
24 |
-
@spaces.GPU
|
25 |
def translate(input_text, source, target):
|
26 |
sentences = input_text.split('\n')
|
27 |
generated_text = []
|
28 |
for sentence in sentences:
|
29 |
prompt = f'[{source}] {sentence} \n[{target}]'
|
30 |
|
31 |
-
input_ids = tokenizer(prompt, return_tensors='pt').input_ids.to(device)
|
32 |
output_ids = model.generate(input_ids, max_length=500, num_beams=5)
|
33 |
input_length = input_ids.shape[1]
|
34 |
|
|
|
8 |
|
9 |
# Load tokenizer and model
|
10 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
11 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
|
|
|
12 |
# Move model to GPU if available
|
|
|
|
|
13 |
languages = [ "Spanish", "Catalan", "English", "French", "German", "Italian", "Portuguese", "Euskera", "Galician",
|
14 |
"Bulgarian", "Czech", "Lithuanian", "Croatian", "Dutch", "Romanian", "Danish", "Greek", "Finnish",
|
15 |
"Hungarian", "Slovak", "Slovenian", "Estonian", "Polish", "Latvian", "Swedish", "Maltese",
|
|
|
18 |
example_sentence = ["Ahir se'n va anar, va agafar les seves coses i es va posar a navegar."]
|
19 |
|
20 |
|
21 |
+
@spaces.GPU(duration=120)
|
22 |
def translate(input_text, source, target):
|
23 |
sentences = input_text.split('\n')
|
24 |
generated_text = []
|
25 |
for sentence in sentences:
|
26 |
prompt = f'[{source}] {sentence} \n[{target}]'
|
27 |
|
28 |
+
input_ids = tokenizer(prompt, return_tensors='pt').input_ids.to(model.device)
|
29 |
output_ids = model.generate(input_ids, max_length=500, num_beams=5)
|
30 |
input_length = input_ids.shape[1]
|
31 |
|
requirements.txt
CHANGED
@@ -2,4 +2,5 @@ torch
|
|
2 |
transformers==4.46.2
|
3 |
gradio==5.5.0
|
4 |
protobuf==5.28.3
|
5 |
-
sentencepiece==0.2.0
|
|
|
|
2 |
transformers==4.46.2
|
3 |
gradio==5.5.0
|
4 |
protobuf==5.28.3
|
5 |
+
sentencepiece==0.2.0
|
6 |
+
accelerate==1.0.1
|