bananabot commited on
Commit
51d9b29
·
1 Parent(s): 67e22f5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -25
app.py CHANGED
@@ -6,36 +6,38 @@ from gradio.mix import Parallel, Series
6
  #import torch.nn.functional as F
7
  from aitextgen import aitextgen
8
 
9
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
10
 
11
- from datasets import load_dataset
12
- dataset = load_dataset("bananabot/engMollywoodSummaries")
13
- tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B")
14
 
15
- tokenizer.pad_token = tokenizer.eos_token
 
 
16
 
17
- def tokenize_function(examples):
18
- return tokenizer(examples["text"], padding="max_length", truncation=True)
19
- tokenized_datasets = dataset.map(tokenize_function, batched=True)
20
 
21
- model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B").to(device)
22
- training_args = TrainingArguments(output_dir="test_trainer")
23
- small_train_dataset = tokenized_datasets["train"].shuffle(seed=42).select(range(1000))
24
- small_eval_dataset = tokenized_datasets["test"].shuffle(seed=42).select(range(1000))
25
- def compute_metrics(eval_pred):
26
- logits, labels = eval_pred
27
- predictions = np.argmax(logits, axis=-1)
28
- return metric.compute(predictions=predictions, references=labels)
29
 
30
- trainer = Trainer(
31
- model=model,
32
- args=training_args,
33
- train_dataset=small_train_dataset,
34
- eval_dataset=small_eval_dataset,
35
- compute_metrics=compute_metrics,
36
- )
 
37
 
38
- trainer.train()
 
 
 
 
 
 
 
 
39
 
40
 
41
 
@@ -59,7 +61,7 @@ ai = aitextgen(model="EleutherAI/gpt-neo-1.3B")
59
  # print (output)
60
 
61
  def ai_text(inp):
62
- generated_text = ai.generate_one(max_length=123, prompt = inp, no_repeat_ngram_size=3, num_beams=5, do_sample=True, temperature=1.37, top_k=69, top_p=0.96)
63
  print(type(generated_text))
64
  return generated_text
65
 
 
6
  #import torch.nn.functional as F
7
  from aitextgen import aitextgen
8
 
9
+ #is fine tuning worth is?????????????????????????????????????????????
10
 
11
+ #device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
 
 
12
 
13
+ #from datasets import load_dataset
14
+ #dataset = load_dataset("bananabot/engMollywoodSummaries")
15
+ #tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-1.3B")
16
 
17
+ #tokenizer.pad_token = tokenizer.eos_token
 
 
18
 
19
+ #def tokenize_function(examples):
20
+ # return tokenizer(examples["text"], padding="max_length", truncation=True)
21
+ #tokenized_datasets = dataset.map(tokenize_function, batched=True)
 
 
 
 
 
22
 
23
+ #model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B").to(device)
24
+ #training_args = TrainingArguments(output_dir="test_trainer")
25
+ #small_train_dataset = tokenized_datasets["train"].shuffle(seed=42).select(range(1000))
26
+ #small_eval_dataset = tokenized_datasets["test"].shuffle(seed=42).select(range(1000))
27
+ #def compute_metrics(eval_pred):
28
+ # logits, labels = eval_pred
29
+ # predictions = np.argmax(logits, axis=-1)
30
+ # return metric.compute(predictions=predictions, references=labels)
31
 
32
+ #trainer = Trainer(
33
+ # model=model,
34
+ # args=training_args,
35
+ # train_dataset=small_train_dataset,
36
+ # eval_dataset=small_eval_dataset,
37
+ # compute_metrics=compute_metrics,
38
+ #)
39
+
40
+ #trainer.train()
41
 
42
 
43
 
 
61
  # print (output)
62
 
63
  def ai_text(inp):
64
+ generated_text = ai.generate_one(max_length=333, prompt = inp, no_repeat_ngram_size=3, num_beams=7, do_sample=True, temperature=1.37, top_k=69, top_p=0.96)
65
  print(type(generated_text))
66
  return generated_text
67