Ubuntu commited on
Commit
4d7573b
0 Parent(s):
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +4 -0
  2. .gitignore +1 -0
  3. __pycache__/keys.cpython-310.pyc +0 -0
  4. data/AI_checker_remade.csv +3 -0
  5. data/new_data_with_embeddings.csv +3 -0
  6. data/original_data.csv +3 -0
  7. models/tokenizer_v1/added_tokens.json +7 -0
  8. models/tokenizer_v1/special_tokens_map.json +7 -0
  9. models/tokenizer_v1/tokenizer.json +0 -0
  10. models/tokenizer_v1/tokenizer_config.json +56 -0
  11. models/tokenizer_v1/vocab.txt +0 -0
  12. models/trained_model_v1/config.json +33 -0
  13. models/trained_model_v1/pytorch_model.bin +3 -0
  14. models/trained_model_v11/added_tokens.json +7 -0
  15. models/trained_model_v11/config.json +33 -0
  16. models/trained_model_v11/pytorch_model.bin +3 -0
  17. models/trained_model_v11/special_tokens_map.json +7 -0
  18. models/trained_model_v11/tokenizer.json +0 -0
  19. models/trained_model_v11/tokenizer_config.json +56 -0
  20. models/trained_model_v11/vocab.txt +0 -0
  21. my_awesome_model/checkpoint-15000/added_tokens.json +7 -0
  22. my_awesome_model/checkpoint-15000/config.json +33 -0
  23. my_awesome_model/checkpoint-15000/optimizer.pt +3 -0
  24. my_awesome_model/checkpoint-15000/pytorch_model.bin +3 -0
  25. my_awesome_model/checkpoint-15000/rng_state.pth +0 -0
  26. my_awesome_model/checkpoint-15000/scheduler.pt +3 -0
  27. my_awesome_model/checkpoint-15000/special_tokens_map.json +7 -0
  28. my_awesome_model/checkpoint-15000/tokenizer.json +0 -0
  29. my_awesome_model/checkpoint-15000/tokenizer_config.json +56 -0
  30. my_awesome_model/checkpoint-15000/trainer_state.json +208 -0
  31. my_awesome_model/checkpoint-15000/training_args.bin +3 -0
  32. my_awesome_model/checkpoint-15000/vocab.txt +0 -0
  33. my_awesome_model/checkpoint-30000/added_tokens.json +7 -0
  34. my_awesome_model/checkpoint-30000/config.json +33 -0
  35. my_awesome_model/checkpoint-30000/optimizer.pt +3 -0
  36. my_awesome_model/checkpoint-30000/pytorch_model.bin +3 -0
  37. my_awesome_model/checkpoint-30000/rng_state.pth +0 -0
  38. my_awesome_model/checkpoint-30000/scheduler.pt +3 -0
  39. my_awesome_model/checkpoint-30000/special_tokens_map.json +7 -0
  40. my_awesome_model/checkpoint-30000/tokenizer.json +0 -0
  41. my_awesome_model/checkpoint-30000/tokenizer_config.json +56 -0
  42. my_awesome_model/checkpoint-30000/trainer_state.json +397 -0
  43. my_awesome_model/checkpoint-30000/training_args.bin +3 -0
  44. my_awesome_model/checkpoint-30000/vocab.txt +0 -0
  45. my_awesome_model/runs/Oct06_09-35-53_ip-172-31-95-165/events.out.tfevents.1696584953.ip-172-31-95-165.4302.0 +0 -0
  46. my_awesome_model/runs/Oct06_10-12-41_ip-172-31-95-165/events.out.tfevents.1696587161.ip-172-31-95-165.5338.0 +0 -0
  47. requirements.txt +11 -0
  48. research/01_embeddings_Similarity.html +0 -0
  49. research/01_embeddings_Similarity.ipynb +1249 -0
  50. research/02_dl_Ai_checker.ipynb +1220 -0
.gitattributes ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ *.csv filter=lfs diff=lfs merge=lfs -text
2
+ *bin filter=lfs diff=lfs merge=lfs -text
3
+ *.pt filter=lfs diff=lfs merge=lfs -text
4
+ *.bin filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ keys.py
__pycache__/keys.cpython-310.pyc ADDED
Binary file (619 Bytes). View file
 
data/AI_checker_remade.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bdba3d2700937b2fe571b5903a76d3c509386fef4b91ae86fe54fae0a3cdd919
3
+ size 301323861
data/new_data_with_embeddings.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c437f101ec9aec5244cd6bd431f486ff8e6b9ecf66db25a915f59f970cdb6fbd
3
+ size 305301653
data/original_data.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cfc33da6a7155466097cbe949c4c50b15b8cb25ac115e801fbe1ef7eb3ac61e
3
+ size 443537732
models/tokenizer_v1/added_tokens.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "[CLS]": 101,
3
+ "[MASK]": 103,
4
+ "[PAD]": 0,
5
+ "[SEP]": 102,
6
+ "[UNK]": 100
7
+ }
models/tokenizer_v1/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
models/tokenizer_v1/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
models/tokenizer_v1/tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "additional_special_tokens": [],
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "[CLS]",
47
+ "do_lower_case": true,
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "pad_token": "[PAD]",
51
+ "sep_token": "[SEP]",
52
+ "strip_accents": null,
53
+ "tokenize_chinese_chars": true,
54
+ "tokenizer_class": "DistilBertTokenizer",
55
+ "unk_token": "[UNK]"
56
+ }
models/tokenizer_v1/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
models/trained_model_v1/config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-uncased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "id2label": {
12
+ "0": "NEGATIVE",
13
+ "1": "POSITIVE"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "label2id": {
17
+ "NEGATIVE": 0,
18
+ "POSITIVE": 1
19
+ },
20
+ "max_position_embeddings": 512,
21
+ "model_type": "distilbert",
22
+ "n_heads": 12,
23
+ "n_layers": 6,
24
+ "pad_token_id": 0,
25
+ "problem_type": "single_label_classification",
26
+ "qa_dropout": 0.1,
27
+ "seq_classif_dropout": 0.2,
28
+ "sinusoidal_pos_embds": false,
29
+ "tie_weights_": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.34.0",
32
+ "vocab_size": 30522
33
+ }
models/trained_model_v1/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:502361f57902b8792424d4c98cf37fc9dfd6a2ea41b2bf3d41346eb1ff0b658b
3
+ size 267855978
models/trained_model_v11/added_tokens.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "[CLS]": 101,
3
+ "[MASK]": 103,
4
+ "[PAD]": 0,
5
+ "[SEP]": 102,
6
+ "[UNK]": 100
7
+ }
models/trained_model_v11/config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-uncased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "id2label": {
12
+ "0": "NEGATIVE",
13
+ "1": "POSITIVE"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "label2id": {
17
+ "NEGATIVE": 0,
18
+ "POSITIVE": 1
19
+ },
20
+ "max_position_embeddings": 512,
21
+ "model_type": "distilbert",
22
+ "n_heads": 12,
23
+ "n_layers": 6,
24
+ "pad_token_id": 0,
25
+ "problem_type": "single_label_classification",
26
+ "qa_dropout": 0.1,
27
+ "seq_classif_dropout": 0.2,
28
+ "sinusoidal_pos_embds": false,
29
+ "tie_weights_": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.34.0",
32
+ "vocab_size": 30522
33
+ }
models/trained_model_v11/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:502361f57902b8792424d4c98cf37fc9dfd6a2ea41b2bf3d41346eb1ff0b658b
3
+ size 267855978
models/trained_model_v11/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
models/trained_model_v11/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
models/trained_model_v11/tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "additional_special_tokens": [],
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "[CLS]",
47
+ "do_lower_case": true,
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "pad_token": "[PAD]",
51
+ "sep_token": "[SEP]",
52
+ "strip_accents": null,
53
+ "tokenize_chinese_chars": true,
54
+ "tokenizer_class": "DistilBertTokenizer",
55
+ "unk_token": "[UNK]"
56
+ }
models/trained_model_v11/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
my_awesome_model/checkpoint-15000/added_tokens.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "[CLS]": 101,
3
+ "[MASK]": 103,
4
+ "[PAD]": 0,
5
+ "[SEP]": 102,
6
+ "[UNK]": 100
7
+ }
my_awesome_model/checkpoint-15000/config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-uncased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "id2label": {
12
+ "0": "NEGATIVE",
13
+ "1": "POSITIVE"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "label2id": {
17
+ "NEGATIVE": 0,
18
+ "POSITIVE": 1
19
+ },
20
+ "max_position_embeddings": 512,
21
+ "model_type": "distilbert",
22
+ "n_heads": 12,
23
+ "n_layers": 6,
24
+ "pad_token_id": 0,
25
+ "problem_type": "single_label_classification",
26
+ "qa_dropout": 0.1,
27
+ "seq_classif_dropout": 0.2,
28
+ "sinusoidal_pos_embds": false,
29
+ "tie_weights_": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.34.0",
32
+ "vocab_size": 30522
33
+ }
my_awesome_model/checkpoint-15000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a469da14025142fa0c4f7f87a31bab3d92a4d6153b096d099432e5ec32a1af3d
3
+ size 535727290
my_awesome_model/checkpoint-15000/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cf74e93c7d98e4d97620113e2ff7b00ff69f3d4ab048b2fbe444e1f4ca08d3c
3
+ size 267855978
my_awesome_model/checkpoint-15000/rng_state.pth ADDED
Binary file (14.2 kB). View file
 
my_awesome_model/checkpoint-15000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0791ab28ed85fc2b7bf19fb7882ce2711b9dea36e12032576612122561cfd5f1
3
+ size 1064
my_awesome_model/checkpoint-15000/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
my_awesome_model/checkpoint-15000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
my_awesome_model/checkpoint-15000/tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "additional_special_tokens": [],
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "[CLS]",
47
+ "do_lower_case": true,
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "pad_token": "[PAD]",
51
+ "sep_token": "[SEP]",
52
+ "strip_accents": null,
53
+ "tokenize_chinese_chars": true,
54
+ "tokenizer_class": "DistilBertTokenizer",
55
+ "unk_token": "[UNK]"
56
+ }
my_awesome_model/checkpoint-15000/trainer_state.json ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.07496260851621628,
3
+ "best_model_checkpoint": "my_awesome_model/checkpoint-15000",
4
+ "epoch": 1.0,
5
+ "eval_steps": 500,
6
+ "global_step": 15000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03,
13
+ "learning_rate": 1.9666666666666666e-05,
14
+ "loss": 0.1446,
15
+ "step": 500
16
+ },
17
+ {
18
+ "epoch": 0.07,
19
+ "learning_rate": 1.9333333333333333e-05,
20
+ "loss": 0.0898,
21
+ "step": 1000
22
+ },
23
+ {
24
+ "epoch": 0.1,
25
+ "learning_rate": 1.9e-05,
26
+ "loss": 0.0545,
27
+ "step": 1500
28
+ },
29
+ {
30
+ "epoch": 0.13,
31
+ "learning_rate": 1.866666666666667e-05,
32
+ "loss": 0.0487,
33
+ "step": 2000
34
+ },
35
+ {
36
+ "epoch": 0.17,
37
+ "learning_rate": 1.8333333333333333e-05,
38
+ "loss": 0.0444,
39
+ "step": 2500
40
+ },
41
+ {
42
+ "epoch": 0.2,
43
+ "learning_rate": 1.8e-05,
44
+ "loss": 0.0443,
45
+ "step": 3000
46
+ },
47
+ {
48
+ "epoch": 0.23,
49
+ "learning_rate": 1.7666666666666668e-05,
50
+ "loss": 0.0439,
51
+ "step": 3500
52
+ },
53
+ {
54
+ "epoch": 0.27,
55
+ "learning_rate": 1.7333333333333336e-05,
56
+ "loss": 0.0291,
57
+ "step": 4000
58
+ },
59
+ {
60
+ "epoch": 0.3,
61
+ "learning_rate": 1.7e-05,
62
+ "loss": 0.0342,
63
+ "step": 4500
64
+ },
65
+ {
66
+ "epoch": 0.33,
67
+ "learning_rate": 1.6666666666666667e-05,
68
+ "loss": 0.0332,
69
+ "step": 5000
70
+ },
71
+ {
72
+ "epoch": 0.37,
73
+ "learning_rate": 1.6333333333333335e-05,
74
+ "loss": 0.0345,
75
+ "step": 5500
76
+ },
77
+ {
78
+ "epoch": 0.4,
79
+ "learning_rate": 1.6000000000000003e-05,
80
+ "loss": 0.0232,
81
+ "step": 6000
82
+ },
83
+ {
84
+ "epoch": 0.43,
85
+ "learning_rate": 1.5666666666666667e-05,
86
+ "loss": 0.0266,
87
+ "step": 6500
88
+ },
89
+ {
90
+ "epoch": 0.47,
91
+ "learning_rate": 1.5333333333333334e-05,
92
+ "loss": 0.0276,
93
+ "step": 7000
94
+ },
95
+ {
96
+ "epoch": 0.5,
97
+ "learning_rate": 1.5000000000000002e-05,
98
+ "loss": 0.0303,
99
+ "step": 7500
100
+ },
101
+ {
102
+ "epoch": 0.53,
103
+ "learning_rate": 1.4666666666666666e-05,
104
+ "loss": 0.0254,
105
+ "step": 8000
106
+ },
107
+ {
108
+ "epoch": 0.57,
109
+ "learning_rate": 1.4333333333333334e-05,
110
+ "loss": 0.0214,
111
+ "step": 8500
112
+ },
113
+ {
114
+ "epoch": 0.6,
115
+ "learning_rate": 1.4e-05,
116
+ "loss": 0.0206,
117
+ "step": 9000
118
+ },
119
+ {
120
+ "epoch": 0.63,
121
+ "learning_rate": 1.3666666666666667e-05,
122
+ "loss": 0.0258,
123
+ "step": 9500
124
+ },
125
+ {
126
+ "epoch": 0.67,
127
+ "learning_rate": 1.3333333333333333e-05,
128
+ "loss": 0.0179,
129
+ "step": 10000
130
+ },
131
+ {
132
+ "epoch": 0.7,
133
+ "learning_rate": 1.3000000000000001e-05,
134
+ "loss": 0.0184,
135
+ "step": 10500
136
+ },
137
+ {
138
+ "epoch": 0.73,
139
+ "learning_rate": 1.2666666666666667e-05,
140
+ "loss": 0.0221,
141
+ "step": 11000
142
+ },
143
+ {
144
+ "epoch": 0.77,
145
+ "learning_rate": 1.2333333333333334e-05,
146
+ "loss": 0.0187,
147
+ "step": 11500
148
+ },
149
+ {
150
+ "epoch": 0.8,
151
+ "learning_rate": 1.2e-05,
152
+ "loss": 0.0174,
153
+ "step": 12000
154
+ },
155
+ {
156
+ "epoch": 0.83,
157
+ "learning_rate": 1.1666666666666668e-05,
158
+ "loss": 0.0181,
159
+ "step": 12500
160
+ },
161
+ {
162
+ "epoch": 0.87,
163
+ "learning_rate": 1.1333333333333334e-05,
164
+ "loss": 0.0176,
165
+ "step": 13000
166
+ },
167
+ {
168
+ "epoch": 0.9,
169
+ "learning_rate": 1.1000000000000001e-05,
170
+ "loss": 0.0171,
171
+ "step": 13500
172
+ },
173
+ {
174
+ "epoch": 0.93,
175
+ "learning_rate": 1.0666666666666667e-05,
176
+ "loss": 0.0149,
177
+ "step": 14000
178
+ },
179
+ {
180
+ "epoch": 0.97,
181
+ "learning_rate": 1.0333333333333335e-05,
182
+ "loss": 0.0136,
183
+ "step": 14500
184
+ },
185
+ {
186
+ "epoch": 1.0,
187
+ "learning_rate": 1e-05,
188
+ "loss": 0.0178,
189
+ "step": 15000
190
+ },
191
+ {
192
+ "epoch": 1.0,
193
+ "eval_accuracy": 0.9843833333333334,
194
+ "eval_loss": 0.07496260851621628,
195
+ "eval_runtime": 200.0448,
196
+ "eval_samples_per_second": 299.933,
197
+ "eval_steps_per_second": 18.746,
198
+ "step": 15000
199
+ }
200
+ ],
201
+ "logging_steps": 500,
202
+ "max_steps": 30000,
203
+ "num_train_epochs": 2,
204
+ "save_steps": 500,
205
+ "total_flos": 2.27844960589872e+16,
206
+ "trial_name": null,
207
+ "trial_params": null
208
+ }
my_awesome_model/checkpoint-15000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6c46d094ebc25b6e4ba42d7d833d3a46a5a4a4755e4e22404dd4492caa24ef3
3
+ size 4536
my_awesome_model/checkpoint-15000/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
my_awesome_model/checkpoint-30000/added_tokens.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "[CLS]": 101,
3
+ "[MASK]": 103,
4
+ "[PAD]": 0,
5
+ "[SEP]": 102,
6
+ "[UNK]": 100
7
+ }
my_awesome_model/checkpoint-30000/config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-uncased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "id2label": {
12
+ "0": "NEGATIVE",
13
+ "1": "POSITIVE"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "label2id": {
17
+ "NEGATIVE": 0,
18
+ "POSITIVE": 1
19
+ },
20
+ "max_position_embeddings": 512,
21
+ "model_type": "distilbert",
22
+ "n_heads": 12,
23
+ "n_layers": 6,
24
+ "pad_token_id": 0,
25
+ "problem_type": "single_label_classification",
26
+ "qa_dropout": 0.1,
27
+ "seq_classif_dropout": 0.2,
28
+ "sinusoidal_pos_embds": false,
29
+ "tie_weights_": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.34.0",
32
+ "vocab_size": 30522
33
+ }
my_awesome_model/checkpoint-30000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5ff325f5bc38ed6bb344bb939c32826b951890c550a38a3ab6ab2f77d1e5836
3
+ size 535727290
my_awesome_model/checkpoint-30000/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:502361f57902b8792424d4c98cf37fc9dfd6a2ea41b2bf3d41346eb1ff0b658b
3
+ size 267855978
my_awesome_model/checkpoint-30000/rng_state.pth ADDED
Binary file (14.2 kB). View file
 
my_awesome_model/checkpoint-30000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b36093e06845c6146f3175c64f0e8bdb441d4f7fc67a6962ed0b80b6725daf1
3
+ size 1064
my_awesome_model/checkpoint-30000/special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
my_awesome_model/checkpoint-30000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
my_awesome_model/checkpoint-30000/tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "additional_special_tokens": [],
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "[CLS]",
47
+ "do_lower_case": true,
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "pad_token": "[PAD]",
51
+ "sep_token": "[SEP]",
52
+ "strip_accents": null,
53
+ "tokenize_chinese_chars": true,
54
+ "tokenizer_class": "DistilBertTokenizer",
55
+ "unk_token": "[UNK]"
56
+ }
my_awesome_model/checkpoint-30000/trainer_state.json ADDED
@@ -0,0 +1,397 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.03921842947602272,
3
+ "best_model_checkpoint": "my_awesome_model/checkpoint-30000",
4
+ "epoch": 2.0,
5
+ "eval_steps": 500,
6
+ "global_step": 30000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03,
13
+ "learning_rate": 1.9666666666666666e-05,
14
+ "loss": 0.1446,
15
+ "step": 500
16
+ },
17
+ {
18
+ "epoch": 0.07,
19
+ "learning_rate": 1.9333333333333333e-05,
20
+ "loss": 0.0898,
21
+ "step": 1000
22
+ },
23
+ {
24
+ "epoch": 0.1,
25
+ "learning_rate": 1.9e-05,
26
+ "loss": 0.0545,
27
+ "step": 1500
28
+ },
29
+ {
30
+ "epoch": 0.13,
31
+ "learning_rate": 1.866666666666667e-05,
32
+ "loss": 0.0487,
33
+ "step": 2000
34
+ },
35
+ {
36
+ "epoch": 0.17,
37
+ "learning_rate": 1.8333333333333333e-05,
38
+ "loss": 0.0444,
39
+ "step": 2500
40
+ },
41
+ {
42
+ "epoch": 0.2,
43
+ "learning_rate": 1.8e-05,
44
+ "loss": 0.0443,
45
+ "step": 3000
46
+ },
47
+ {
48
+ "epoch": 0.23,
49
+ "learning_rate": 1.7666666666666668e-05,
50
+ "loss": 0.0439,
51
+ "step": 3500
52
+ },
53
+ {
54
+ "epoch": 0.27,
55
+ "learning_rate": 1.7333333333333336e-05,
56
+ "loss": 0.0291,
57
+ "step": 4000
58
+ },
59
+ {
60
+ "epoch": 0.3,
61
+ "learning_rate": 1.7e-05,
62
+ "loss": 0.0342,
63
+ "step": 4500
64
+ },
65
+ {
66
+ "epoch": 0.33,
67
+ "learning_rate": 1.6666666666666667e-05,
68
+ "loss": 0.0332,
69
+ "step": 5000
70
+ },
71
+ {
72
+ "epoch": 0.37,
73
+ "learning_rate": 1.6333333333333335e-05,
74
+ "loss": 0.0345,
75
+ "step": 5500
76
+ },
77
+ {
78
+ "epoch": 0.4,
79
+ "learning_rate": 1.6000000000000003e-05,
80
+ "loss": 0.0232,
81
+ "step": 6000
82
+ },
83
+ {
84
+ "epoch": 0.43,
85
+ "learning_rate": 1.5666666666666667e-05,
86
+ "loss": 0.0266,
87
+ "step": 6500
88
+ },
89
+ {
90
+ "epoch": 0.47,
91
+ "learning_rate": 1.5333333333333334e-05,
92
+ "loss": 0.0276,
93
+ "step": 7000
94
+ },
95
+ {
96
+ "epoch": 0.5,
97
+ "learning_rate": 1.5000000000000002e-05,
98
+ "loss": 0.0303,
99
+ "step": 7500
100
+ },
101
+ {
102
+ "epoch": 0.53,
103
+ "learning_rate": 1.4666666666666666e-05,
104
+ "loss": 0.0254,
105
+ "step": 8000
106
+ },
107
+ {
108
+ "epoch": 0.57,
109
+ "learning_rate": 1.4333333333333334e-05,
110
+ "loss": 0.0214,
111
+ "step": 8500
112
+ },
113
+ {
114
+ "epoch": 0.6,
115
+ "learning_rate": 1.4e-05,
116
+ "loss": 0.0206,
117
+ "step": 9000
118
+ },
119
+ {
120
+ "epoch": 0.63,
121
+ "learning_rate": 1.3666666666666667e-05,
122
+ "loss": 0.0258,
123
+ "step": 9500
124
+ },
125
+ {
126
+ "epoch": 0.67,
127
+ "learning_rate": 1.3333333333333333e-05,
128
+ "loss": 0.0179,
129
+ "step": 10000
130
+ },
131
+ {
132
+ "epoch": 0.7,
133
+ "learning_rate": 1.3000000000000001e-05,
134
+ "loss": 0.0184,
135
+ "step": 10500
136
+ },
137
+ {
138
+ "epoch": 0.73,
139
+ "learning_rate": 1.2666666666666667e-05,
140
+ "loss": 0.0221,
141
+ "step": 11000
142
+ },
143
+ {
144
+ "epoch": 0.77,
145
+ "learning_rate": 1.2333333333333334e-05,
146
+ "loss": 0.0187,
147
+ "step": 11500
148
+ },
149
+ {
150
+ "epoch": 0.8,
151
+ "learning_rate": 1.2e-05,
152
+ "loss": 0.0174,
153
+ "step": 12000
154
+ },
155
+ {
156
+ "epoch": 0.83,
157
+ "learning_rate": 1.1666666666666668e-05,
158
+ "loss": 0.0181,
159
+ "step": 12500
160
+ },
161
+ {
162
+ "epoch": 0.87,
163
+ "learning_rate": 1.1333333333333334e-05,
164
+ "loss": 0.0176,
165
+ "step": 13000
166
+ },
167
+ {
168
+ "epoch": 0.9,
169
+ "learning_rate": 1.1000000000000001e-05,
170
+ "loss": 0.0171,
171
+ "step": 13500
172
+ },
173
+ {
174
+ "epoch": 0.93,
175
+ "learning_rate": 1.0666666666666667e-05,
176
+ "loss": 0.0149,
177
+ "step": 14000
178
+ },
179
+ {
180
+ "epoch": 0.97,
181
+ "learning_rate": 1.0333333333333335e-05,
182
+ "loss": 0.0136,
183
+ "step": 14500
184
+ },
185
+ {
186
+ "epoch": 1.0,
187
+ "learning_rate": 1e-05,
188
+ "loss": 0.0178,
189
+ "step": 15000
190
+ },
191
+ {
192
+ "epoch": 1.0,
193
+ "eval_accuracy": 0.9843833333333334,
194
+ "eval_loss": 0.07496260851621628,
195
+ "eval_runtime": 200.0448,
196
+ "eval_samples_per_second": 299.933,
197
+ "eval_steps_per_second": 18.746,
198
+ "step": 15000
199
+ },
200
+ {
201
+ "epoch": 1.03,
202
+ "learning_rate": 9.666666666666667e-06,
203
+ "loss": 0.0039,
204
+ "step": 15500
205
+ },
206
+ {
207
+ "epoch": 1.07,
208
+ "learning_rate": 9.333333333333334e-06,
209
+ "loss": 0.0061,
210
+ "step": 16000
211
+ },
212
+ {
213
+ "epoch": 1.1,
214
+ "learning_rate": 9e-06,
215
+ "loss": 0.0114,
216
+ "step": 16500
217
+ },
218
+ {
219
+ "epoch": 1.13,
220
+ "learning_rate": 8.666666666666668e-06,
221
+ "loss": 0.0039,
222
+ "step": 17000
223
+ },
224
+ {
225
+ "epoch": 1.17,
226
+ "learning_rate": 8.333333333333334e-06,
227
+ "loss": 0.0075,
228
+ "step": 17500
229
+ },
230
+ {
231
+ "epoch": 1.2,
232
+ "learning_rate": 8.000000000000001e-06,
233
+ "loss": 0.0037,
234
+ "step": 18000
235
+ },
236
+ {
237
+ "epoch": 1.23,
238
+ "learning_rate": 7.666666666666667e-06,
239
+ "loss": 0.0105,
240
+ "step": 18500
241
+ },
242
+ {
243
+ "epoch": 1.27,
244
+ "learning_rate": 7.333333333333333e-06,
245
+ "loss": 0.0058,
246
+ "step": 19000
247
+ },
248
+ {
249
+ "epoch": 1.3,
250
+ "learning_rate": 7e-06,
251
+ "loss": 0.0036,
252
+ "step": 19500
253
+ },
254
+ {
255
+ "epoch": 1.33,
256
+ "learning_rate": 6.666666666666667e-06,
257
+ "loss": 0.0079,
258
+ "step": 20000
259
+ },
260
+ {
261
+ "epoch": 1.37,
262
+ "learning_rate": 6.333333333333333e-06,
263
+ "loss": 0.0091,
264
+ "step": 20500
265
+ },
266
+ {
267
+ "epoch": 1.4,
268
+ "learning_rate": 6e-06,
269
+ "loss": 0.0054,
270
+ "step": 21000
271
+ },
272
+ {
273
+ "epoch": 1.43,
274
+ "learning_rate": 5.666666666666667e-06,
275
+ "loss": 0.0013,
276
+ "step": 21500
277
+ },
278
+ {
279
+ "epoch": 1.47,
280
+ "learning_rate": 5.333333333333334e-06,
281
+ "loss": 0.0104,
282
+ "step": 22000
283
+ },
284
+ {
285
+ "epoch": 1.5,
286
+ "learning_rate": 5e-06,
287
+ "loss": 0.0037,
288
+ "step": 22500
289
+ },
290
+ {
291
+ "epoch": 1.53,
292
+ "learning_rate": 4.666666666666667e-06,
293
+ "loss": 0.0044,
294
+ "step": 23000
295
+ },
296
+ {
297
+ "epoch": 1.57,
298
+ "learning_rate": 4.333333333333334e-06,
299
+ "loss": 0.0047,
300
+ "step": 23500
301
+ },
302
+ {
303
+ "epoch": 1.6,
304
+ "learning_rate": 4.000000000000001e-06,
305
+ "loss": 0.0035,
306
+ "step": 24000
307
+ },
308
+ {
309
+ "epoch": 1.63,
310
+ "learning_rate": 3.6666666666666666e-06,
311
+ "loss": 0.0026,
312
+ "step": 24500
313
+ },
314
+ {
315
+ "epoch": 1.67,
316
+ "learning_rate": 3.3333333333333333e-06,
317
+ "loss": 0.0033,
318
+ "step": 25000
319
+ },
320
+ {
321
+ "epoch": 1.7,
322
+ "learning_rate": 3e-06,
323
+ "loss": 0.0027,
324
+ "step": 25500
325
+ },
326
+ {
327
+ "epoch": 1.73,
328
+ "learning_rate": 2.666666666666667e-06,
329
+ "loss": 0.002,
330
+ "step": 26000
331
+ },
332
+ {
333
+ "epoch": 1.77,
334
+ "learning_rate": 2.3333333333333336e-06,
335
+ "loss": 0.001,
336
+ "step": 26500
337
+ },
338
+ {
339
+ "epoch": 1.8,
340
+ "learning_rate": 2.0000000000000003e-06,
341
+ "loss": 0.0014,
342
+ "step": 27000
343
+ },
344
+ {
345
+ "epoch": 1.83,
346
+ "learning_rate": 1.6666666666666667e-06,
347
+ "loss": 0.004,
348
+ "step": 27500
349
+ },
350
+ {
351
+ "epoch": 1.87,
352
+ "learning_rate": 1.3333333333333334e-06,
353
+ "loss": 0.0016,
354
+ "step": 28000
355
+ },
356
+ {
357
+ "epoch": 1.9,
358
+ "learning_rate": 1.0000000000000002e-06,
359
+ "loss": 0.0027,
360
+ "step": 28500
361
+ },
362
+ {
363
+ "epoch": 1.93,
364
+ "learning_rate": 6.666666666666667e-07,
365
+ "loss": 0.0026,
366
+ "step": 29000
367
+ },
368
+ {
369
+ "epoch": 1.97,
370
+ "learning_rate": 3.3333333333333335e-07,
371
+ "loss": 0.0007,
372
+ "step": 29500
373
+ },
374
+ {
375
+ "epoch": 2.0,
376
+ "learning_rate": 0.0,
377
+ "loss": 0.0016,
378
+ "step": 30000
379
+ },
380
+ {
381
+ "epoch": 2.0,
382
+ "eval_accuracy": 0.9938666666666667,
383
+ "eval_loss": 0.03921842947602272,
384
+ "eval_runtime": 201.1467,
385
+ "eval_samples_per_second": 298.29,
386
+ "eval_steps_per_second": 18.643,
387
+ "step": 30000
388
+ }
389
+ ],
390
+ "logging_steps": 500,
391
+ "max_steps": 30000,
392
+ "num_train_epochs": 2,
393
+ "save_steps": 500,
394
+ "total_flos": 4.559380077797894e+16,
395
+ "trial_name": null,
396
+ "trial_params": null
397
+ }
my_awesome_model/checkpoint-30000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6c46d094ebc25b6e4ba42d7d833d3a46a5a4a4755e4e22404dd4492caa24ef3
3
+ size 4536
my_awesome_model/checkpoint-30000/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
my_awesome_model/runs/Oct06_09-35-53_ip-172-31-95-165/events.out.tfevents.1696584953.ip-172-31-95-165.4302.0 ADDED
Binary file (4.14 kB). View file
 
my_awesome_model/runs/Oct06_10-12-41_ip-172-31-95-165/events.out.tfevents.1696587161.ip-172-31-95-165.5338.0 ADDED
Binary file (14.7 kB). View file
 
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ transformers
2
+ requests
3
+ datasets
4
+ torch
5
+ pandas
6
+ numpy
7
+ tensorflow
8
+ tensorflow_hub
9
+ tensorflow_text
10
+ scikit-learn
11
+ evaluate
research/01_embeddings_Similarity.html ADDED
The diff for this file is too large to render. See raw diff
 
research/01_embeddings_Similarity.ipynb ADDED
@@ -0,0 +1,1249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 10,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import os; os.chdir('..')"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "code",
14
+ "execution_count": 11,
15
+ "metadata": {},
16
+ "outputs": [],
17
+ "source": [
18
+ "from keys import get_similarity_against"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "code",
23
+ "execution_count": null,
24
+ "metadata": {},
25
+ "outputs": [],
26
+ "source": []
27
+ },
28
+ {
29
+ "cell_type": "code",
30
+ "execution_count": 1,
31
+ "metadata": {},
32
+ "outputs": [],
33
+ "source": [
34
+ "main_query= \"water intoxication\"\n"
35
+ ]
36
+ },
37
+ {
38
+ "cell_type": "code",
39
+ "execution_count": 1,
40
+ "metadata": {},
41
+ "outputs": [],
42
+ "source": [
43
+ "sentence1= '''Water intoxication, also known as water poisoning, hyperhydration, overhydration, or water toxemia, is a potentially fatal disturbance in brain functions that results when the normal balance of electrolytes in the body is pushed outside safe limits by excessive water intake.\n",
44
+ "\n",
45
+ "Under normal circumstances, accidentally consuming too much water is exceptionally rare. Nearly all deaths related to water intoxication in normal individuals have resulted either from water-drinking contests, in which individuals attempt to consume large amounts of water, or from long bouts of exercise during which excessive amounts of fluid were consumed.[1] In addition, water cure, a method of torture in which the victim is forced to consume excessive amounts of water, can cause water intoxication.[1]\n",
46
+ "\n",
47
+ "Water, like any other substance, can be considered a poison when over-consumed in a brief period of time. Water intoxication mostly occurs when water is being consumed in a high quantity without adequate electrolyte intake.[2]\n",
48
+ "\n",
49
+ "Excess of body water may also be a result of a medical condition or improper treatment; see \"hyponatremia\" for some examples. Water is considered one of the least toxic chemical compounds, with an LD50 exceeding 90 ml/kg in rats;[3] drinking six liters in three hours has caused the death of a human.[4]'''\n",
50
+ "\n",
51
+ "\n",
52
+ "sentence2= '''Hyponatremia, colloquially termed aqua inebriation, or aqueous toxemia, represents a perilous derangement of cerebral functions. It ensues when the delicate equilibrium of bodily electrolytes is jolted beyond secure thresholds by an extravagant indulgence in aqueous libations.\n",
53
+ "\n",
54
+ "In ordinary circumstances, inadvertent indulgence in excessive aqueous elixir is exceedingly exceptional. Virtually all instances of aqueous inebriation-related demises in average individuals have stemmed either from aquatic imbiber duels, wherein contenders vie to imbibe copious volumes of water, or from prolonged stints of exertion accompanied by the immoderate ingestion of fluids. Additionally, aqua torment, a tormenting methodology in which the sufferer is coerced into partaking of profuse quantities of water, can precipitate aqueous inebriation.\n",
55
+ "\n",
56
+ "H2O, akin to any other substance, may be deemed venomous when extravagantly consumed within a concise temporal interval. Aqua intoxication predominantly manifests itself when an exorbitant quantum of water is ingested without commensurate electrolytic supplementation.\n",
57
+ "\n",
58
+ "A superabundance of corporeal aqueous content might also emerge as an outcome of a medical ailment or inept therapy; for illustrative instances, refer to \"hyponatremia.\" Water is reckoned as one of the least virulent chemical compounds, boasting an LD50 that surpasses 90 ml/kg in rodents. Consuming six liters in a mere three hours has led to the demise of a human.'''"
59
+ ]
60
+ },
61
+ {
62
+ "cell_type": "code",
63
+ "execution_count": 64,
64
+ "metadata": {},
65
+ "outputs": [],
66
+ "source": []
67
+ },
68
+ {
69
+ "cell_type": "code",
70
+ "execution_count": 14,
71
+ "metadata": {},
72
+ "outputs": [
73
+ {
74
+ "data": {
75
+ "text/plain": [
76
+ "'Water intoxication, also known as water poisoning, hyperhydration, overhydration, or water toxemia, is a potentially fatal disturbance in brain functions that results when the normal balance of electrolytes in the body is pushed outside safe limits by excessive water intake.\\n\\nUnder normal circumstances, accidentally consuming too much water is exceptionally rare. Nearly all deaths related to water intoxication in normal individuals have resulted either from water-drinking contests, in which individuals attempt to consume large amounts of water, or from long bouts of exercise during which excessive amounts of fluid were consumed.[1] In addition, water cure, a method of torture in which the victim is forced to consume excessive amounts of water, can cause water intoxication.[1]\\n\\nWater, like any other substance, can be considered a poison when over-consumed in a brief period of time. Water intoxication mostly occurs when water is being consumed in a high quantity without adequate electrolyte intake.[2]\\n\\nExcess of body water may also be a result of a medical condition or improper treatment; see \"hyponatremia\" for some examples. Water is considered one of the least toxic chemical compounds, with an LD50 exceeding 90 ml/kg in rats;[3] drinking six liters in three hours has caused the death of a human.[4]'"
77
+ ]
78
+ },
79
+ "execution_count": 14,
80
+ "metadata": {},
81
+ "output_type": "execute_result"
82
+ }
83
+ ],
84
+ "source": [
85
+ "sentence1"
86
+ ]
87
+ },
88
+ {
89
+ "cell_type": "code",
90
+ "execution_count": 15,
91
+ "metadata": {},
92
+ "outputs": [
93
+ {
94
+ "data": {
95
+ "text/plain": [
96
+ "'Hyponatremia, colloquially termed aqua inebriation, or aqueous toxemia, represents a perilous derangement of cerebral functions. It ensues when the delicate equilibrium of bodily electrolytes is jolted beyond secure thresholds by an extravagant indulgence in aqueous libations.\\n\\nIn ordinary circumstances, inadvertent indulgence in excessive aqueous elixir is exceedingly exceptional. Virtually all instances of aqueous inebriation-related demises in average individuals have stemmed either from aquatic imbiber duels, wherein contenders vie to imbibe copious volumes of water, or from prolonged stints of exertion accompanied by the immoderate ingestion of fluids. Additionally, aqua torment, a tormenting methodology in which the sufferer is coerced into partaking of profuse quantities of water, can precipitate aqueous inebriation.\\n\\nH2O, akin to any other substance, may be deemed venomous when extravagantly consumed within a concise temporal interval. Aqua intoxication predominantly manifests itself when an exorbitant quantum of water is ingested without commensurate electrolytic supplementation.\\n\\nA superabundance of corporeal aqueous content might also emerge as an outcome of a medical ailment or inept therapy; for illustrative instances, refer to \"hyponatremia.\" Water is reckoned as one of the least virulent chemical compounds, boasting an LD50 that surpasses 90 ml/kg in rodents. Consuming six liters in a mere three hours has led to the demise of a human.'"
97
+ ]
98
+ },
99
+ "execution_count": 15,
100
+ "metadata": {},
101
+ "output_type": "execute_result"
102
+ }
103
+ ],
104
+ "source": [
105
+ "sentence2"
106
+ ]
107
+ },
108
+ {
109
+ "cell_type": "markdown",
110
+ "metadata": {},
111
+ "source": [
112
+ "# Method 1: `Direct Similarity of Embeddings`"
113
+ ]
114
+ },
115
+ {
116
+ "cell_type": "code",
117
+ "execution_count": 16,
118
+ "metadata": {},
119
+ "outputs": [],
120
+ "source": [
121
+ "import requests\n",
122
+ "import json"
123
+ ]
124
+ },
125
+ {
126
+ "cell_type": "code",
127
+ "execution_count": 17,
128
+ "metadata": {},
129
+ "outputs": [],
130
+ "source": [
131
+ "response= requests.post(url= \"https://embeddings.paperbot.ai/get-similarity-against\",\n",
132
+ " json={\n",
133
+ " \"main_entity\": main_query, \n",
134
+ " \"compare_with\": [sentence1, sentence2]\n",
135
+ " })"
136
+ ]
137
+ },
138
+ {
139
+ "cell_type": "code",
140
+ "execution_count": 18,
141
+ "metadata": {},
142
+ "outputs": [
143
+ {
144
+ "data": {
145
+ "text/plain": [
146
+ "[0.94, 0.9]"
147
+ ]
148
+ },
149
+ "execution_count": 18,
150
+ "metadata": {},
151
+ "output_type": "execute_result"
152
+ }
153
+ ],
154
+ "source": [
155
+ "json.loads(response.content.decode(\"utf-8\"))['similarity']\n"
156
+ ]
157
+ },
158
+ {
159
+ "cell_type": "markdown",
160
+ "metadata": {},
161
+ "source": [
162
+ "# Method 1: `BERT Question-Answering`\n"
163
+ ]
164
+ },
165
+ {
166
+ "cell_type": "code",
167
+ "execution_count": 4,
168
+ "metadata": {},
169
+ "outputs": [],
170
+ "source": [
171
+ "from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline\n",
172
+ "\n",
173
+ "model_name = \"deepset/roberta-base-squad2\""
174
+ ]
175
+ },
176
+ {
177
+ "cell_type": "code",
178
+ "execution_count": 3,
179
+ "metadata": {},
180
+ "outputs": [
181
+ {
182
+ "name": "stderr",
183
+ "output_type": "stream",
184
+ "text": [
185
+ "/home/ubuntu/SentenceStructureComparision/venv/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
186
+ " from .autonotebook import tqdm as notebook_tqdm\n"
187
+ ]
188
+ }
189
+ ],
190
+ "source": [
191
+ "\n",
192
+ "\n",
193
+ "# a) Get predictions\n",
194
+ "nlp = pipeline('question-answering', model=model_name, tokenizer=model_name)\n",
195
+ "\n",
196
+ "\n"
197
+ ]
198
+ },
199
+ {
200
+ "cell_type": "code",
201
+ "execution_count": 4,
202
+ "metadata": {},
203
+ "outputs": [
204
+ {
205
+ "data": {
206
+ "text/plain": [
207
+ "{'score': 0.001219886471517384,\n",
208
+ " 'start': 252,\n",
209
+ " 'end': 274,\n",
210
+ " 'answer': 'excessive water intake'}"
211
+ ]
212
+ },
213
+ "execution_count": 4,
214
+ "metadata": {},
215
+ "output_type": "execute_result"
216
+ }
217
+ ],
218
+ "source": [
219
+ "QA_input = {\n",
220
+ " 'question': main_query,\n",
221
+ " 'context': sentence1\n",
222
+ "}\n",
223
+ "res = nlp(QA_input)\n",
224
+ "\n",
225
+ "res"
226
+ ]
227
+ },
228
+ {
229
+ "cell_type": "code",
230
+ "execution_count": 5,
231
+ "metadata": {},
232
+ "outputs": [
233
+ {
234
+ "data": {
235
+ "text/plain": [
236
+ "{'score': 2.8929189284099266e-05,\n",
237
+ " 'start': 958,\n",
238
+ " 'end': 975,\n",
239
+ " 'answer': 'Aqua intoxication'}"
240
+ ]
241
+ },
242
+ "execution_count": 5,
243
+ "metadata": {},
244
+ "output_type": "execute_result"
245
+ }
246
+ ],
247
+ "source": [
248
+ "QA_input = {\n",
249
+ " 'question': main_query,\n",
250
+ " 'context': sentence2\n",
251
+ "}\n",
252
+ "res = nlp(QA_input)\n",
253
+ "\n",
254
+ "res"
255
+ ]
256
+ },
257
+ {
258
+ "cell_type": "markdown",
259
+ "metadata": {},
260
+ "source": [
261
+ "## Perplexity"
262
+ ]
263
+ },
264
+ {
265
+ "cell_type": "code",
266
+ "execution_count": 5,
267
+ "metadata": {},
268
+ "outputs": [
269
+ {
270
+ "name": "stderr",
271
+ "output_type": "stream",
272
+ "text": [
273
+ "Some weights of RobertaForMaskedLM were not initialized from the model checkpoint at deepset/roberta-base-squad2 and are newly initialized: ['lm_head.dense.weight', 'lm_head.layer_norm.bias', 'lm_head.dense.bias', 'lm_head.layer_norm.weight', 'lm_head.decoder.bias', 'lm_head.bias']\n",
274
+ "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
275
+ ]
276
+ }
277
+ ],
278
+ "source": [
279
+ "import torch\n",
280
+ "from transformers import AutoTokenizer, AutoModelForMaskedLM\n",
281
+ "tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
282
+ "model = AutoModelForMaskedLM.from_pretrained(model_name).to(\"cuda\")\n",
283
+ "\n",
284
+ "\n"
285
+ ]
286
+ },
287
+ {
288
+ "cell_type": "code",
289
+ "execution_count": 6,
290
+ "metadata": {},
291
+ "outputs": [
292
+ {
293
+ "data": {
294
+ "text/plain": [
295
+ "RobertaConfig {\n",
296
+ " \"_name_or_path\": \"deepset/roberta-base-squad2\",\n",
297
+ " \"architectures\": [\n",
298
+ " \"RobertaForQuestionAnswering\"\n",
299
+ " ],\n",
300
+ " \"attention_probs_dropout_prob\": 0.1,\n",
301
+ " \"bos_token_id\": 0,\n",
302
+ " \"classifier_dropout\": null,\n",
303
+ " \"eos_token_id\": 2,\n",
304
+ " \"gradient_checkpointing\": false,\n",
305
+ " \"hidden_act\": \"gelu\",\n",
306
+ " \"hidden_dropout_prob\": 0.1,\n",
307
+ " \"hidden_size\": 768,\n",
308
+ " \"initializer_range\": 0.02,\n",
309
+ " \"intermediate_size\": 3072,\n",
310
+ " \"language\": \"english\",\n",
311
+ " \"layer_norm_eps\": 1e-05,\n",
312
+ " \"max_position_embeddings\": 514,\n",
313
+ " \"model_type\": \"roberta\",\n",
314
+ " \"name\": \"Roberta\",\n",
315
+ " \"num_attention_heads\": 12,\n",
316
+ " \"num_hidden_layers\": 12,\n",
317
+ " \"pad_token_id\": 1,\n",
318
+ " \"position_embedding_type\": \"absolute\",\n",
319
+ " \"transformers_version\": \"4.34.0\",\n",
320
+ " \"type_vocab_size\": 1,\n",
321
+ " \"use_cache\": true,\n",
322
+ " \"vocab_size\": 50265\n",
323
+ "}"
324
+ ]
325
+ },
326
+ "execution_count": 6,
327
+ "metadata": {},
328
+ "output_type": "execute_result"
329
+ }
330
+ ],
331
+ "source": [
332
+ "model.config"
333
+ ]
334
+ },
335
+ {
336
+ "cell_type": "code",
337
+ "execution_count": 9,
338
+ "metadata": {},
339
+ "outputs": [],
340
+ "source": [
341
+ "def calculate_perplexity(sentence):\n",
342
+ " inputs = tokenizer(sentence, return_tensors='pt').to(\"cuda\")\n",
343
+ " with torch.no_grad():\n",
344
+ " outputs = model(**inputs, labels=inputs['input_ids'])\n",
345
+ " print(outputs)\n",
346
+ " loss = outputs.loss # cross entropy loss ----> assuming our model gives us probabilities of different words\n",
347
+ " perplexity = torch.exp(loss)\n",
348
+ " return perplexity.item()"
349
+ ]
350
+ },
351
+ {
352
+ "cell_type": "code",
353
+ "execution_count": 10,
354
+ "metadata": {},
355
+ "outputs": [
356
+ {
357
+ "name": "stdout",
358
+ "output_type": "stream",
359
+ "text": [
360
+ "MaskedLMOutput(loss=tensor(17.8039, device='cuda:0'), logits=tensor([[[ 0.3795, 0.3104, -0.5476, ..., 0.6206, 1.2048, 0.2867],\n",
361
+ " [ 6.1099, 0.2996, -3.4115, ..., 0.1186, -2.1274, -0.5033],\n",
362
+ " [ 3.1611, 0.4855, -1.8712, ..., 1.0915, -1.1092, 0.9038],\n",
363
+ " ...,\n",
364
+ " [ 4.7616, 0.5287, -4.6328, ..., 0.6480, -2.6257, 0.2898],\n",
365
+ " [-1.5800, 0.9459, -4.8465, ..., -1.5705, -3.4567, -1.7680],\n",
366
+ " [-2.2582, 0.7316, -5.1463, ..., -2.0331, -4.5188, -0.8688]]],\n",
367
+ " device='cuda:0'), hidden_states=None, attentions=None)\n",
368
+ "Perplexity of the sentence1: 53969640.0\n",
369
+ "MaskedLMOutput(loss=tensor(17.1493, device='cuda:0'), logits=tensor([[[ 0.2888, 0.1490, -0.1353, ..., 0.9108, 1.6271, 0.4736],\n",
370
+ " [ 7.6726, 0.6179, -2.9211, ..., 1.1359, -1.1669, 0.7527],\n",
371
+ " [ 3.2592, 0.6674, -4.1829, ..., -1.3289, -2.9414, 0.0205],\n",
372
+ " ...,\n",
373
+ " [ 1.4004, 0.5725, -3.3266, ..., -0.8717, -3.7206, -0.4705],\n",
374
+ " [-1.4701, 0.8628, -5.1591, ..., -1.6357, -3.4620, -1.6146],\n",
375
+ " [-2.7571, 0.6047, -5.1016, ..., -1.9740, -4.2890, -1.0212]]],\n",
376
+ " device='cuda:0'), hidden_states=None, attentions=None)\n",
377
+ "Perplexity of the sentence2: 28044616.0\n"
378
+ ]
379
+ }
380
+ ],
381
+ "source": [
382
+ "print(f'Perplexity of the sentence1: {calculate_perplexity(sentence1)}')\n",
383
+ "print(f'Perplexity of the sentence2: {calculate_perplexity(sentence2)}')"
384
+ ]
385
+ },
386
+ {
387
+ "cell_type": "code",
388
+ "execution_count": 11,
389
+ "metadata": {},
390
+ "outputs": [
391
+ {
392
+ "data": {
393
+ "text/plain": [
394
+ "{'input_ids': tensor([[ 0, 25589, 34205, 6, 67, 684, 25, 514, 15000, 6,\n",
395
+ " 8944, 30420, 8475, 6, 81, 30420, 8475, 6, 50, 514,\n",
396
+ " 46334, 23249, 6, 16, 10, 2905, 6484, 20771, 11, 2900,\n",
397
+ " 8047, 14, 775, 77, 5, 2340, 2394, 9, 39875, 12782,\n",
398
+ " 11, 5, 809, 16, 3148, 751, 1522, 4971, 30, 10079,\n",
399
+ " 514, 14797, 4, 50118, 50118, 17245, 2340, 4215, 6, 13636,\n",
400
+ " 16997, 350, 203, 514, 16, 20135, 3159, 4, 9221, 70,\n",
401
+ " 3257, 1330, 7, 514, 34205, 11, 2340, 2172, 33, 4596,\n",
402
+ " 1169, 31, 514, 12, 10232, 18957, 11997, 6, 11, 61,\n",
403
+ " 2172, 2120, 7, 14623, 739, 5353, 9, 514, 6, 50,\n",
404
+ " 31, 251, 24750, 9, 3325, 148, 61, 10079, 5353, 9,\n",
405
+ " 12293, 58, 13056, 31274, 134, 742, 96, 1285, 6, 514,\n",
406
+ " 13306, 6, 10, 5448, 9, 11809, 11, 61, 5, 1802,\n",
407
+ " 16, 1654, 7, 14623, 10079, 5353, 9, 514, 6, 64,\n",
408
+ " 1303, 514, 34205, 31274, 134, 742, 50118, 50118, 25589, 6,\n",
409
+ " 101, 143, 97, 6572, 6, 64, 28, 1687, 10, 17712,\n",
410
+ " 77, 81, 12, 10998, 28817, 11, 10, 4315, 675, 9,\n",
411
+ " 86, 4, 3201, 34205, 2260, 11493, 77, 514, 16, 145,\n",
412
+ " 13056, 11, 10, 239, 16363, 396, 9077, 39875, 859, 14797,\n",
413
+ " 31274, 176, 742, 50118, 50118, 9089, 19348, 9, 809, 514,\n",
414
+ " 189, 67, 28, 10, 898, 9, 10, 1131, 1881, 50,\n",
415
+ " 18418, 1416, 131, 192, 22, 33027, 261, 415, 5593, 493,\n",
416
+ " 113, 13, 103, 7721, 4, 3201, 16, 1687, 65, 9,\n",
417
+ " 5, 513, 8422, 4747, 18291, 6, 19, 41, 34744, 1096,\n",
418
+ " 17976, 1814, 36769, 73, 9043, 11, 24162, 131, 10975, 246,\n",
419
+ " 742, 4835, 411, 6474, 268, 11, 130, 722, 34, 1726,\n",
420
+ " 5, 744, 9, 10, 1050, 31274, 306, 742, 2]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
421
+ " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
422
+ " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
423
+ " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
424
+ " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
425
+ " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
426
+ " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
427
+ " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
428
+ " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
429
+ " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
430
+ " 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
431
+ " 1, 1, 1, 1, 1]])}"
432
+ ]
433
+ },
434
+ "execution_count": 11,
435
+ "metadata": {},
436
+ "output_type": "execute_result"
437
+ }
438
+ ],
439
+ "source": [
440
+ "encodings_sentence1= tokenizer(sentence1, return_tensors=\"pt\")\n",
441
+ "encodings_sentence1"
442
+ ]
443
+ },
444
+ {
445
+ "cell_type": "code",
446
+ "execution_count": 24,
447
+ "metadata": {},
448
+ "outputs": [
449
+ {
450
+ "name": "stdout",
451
+ "output_type": "stream",
452
+ "text": [
453
+ "seq_len = 269\n"
454
+ ]
455
+ },
456
+ {
457
+ "name": "stderr",
458
+ "output_type": "stream",
459
+ "text": [
460
+ " 0%| | 0/269 [00:00<?, ?it/s]\n"
461
+ ]
462
+ }
463
+ ],
464
+ "source": [
465
+ "import torch\n",
466
+ "from tqdm import tqdm\n",
467
+ "\n",
468
+ "# max_length = model.config.n_positions\n",
469
+ "max_length= model.config.max_position_embeddings\n",
470
+ "stride = 1\n",
471
+ "seq_len = encodings_sentence1.input_ids.size(1)\n",
472
+ "\n",
473
+ "print(f\"seq_len = {seq_len}\")\n",
474
+ "\n",
475
+ "nlls = []\n",
476
+ "prev_end_loc = 0\n",
477
+ "for begin_loc in tqdm(range(0, seq_len, stride)):\n",
478
+ " end_loc = min(begin_loc + max_length, seq_len)\n",
479
+ " trg_len = end_loc - prev_end_loc # may be different from stride on last loop\n",
480
+ " input_ids = encodings_sentence1.input_ids[:, begin_loc:end_loc].to(\"cuda\")\n",
481
+ " target_ids = input_ids.clone()\n",
482
+ " target_ids[:, :-trg_len] = -100\n",
483
+ "\n",
484
+ " with torch.no_grad():\n",
485
+ " outputs = model(input_ids, labels=target_ids)\n",
486
+ "\n",
487
+ " # loss is calculated using CrossEntropyLoss which averages over valid labels\n",
488
+ " # N.B. the model only calculates loss over trg_len - 1 labels, because it internally shifts the labels\n",
489
+ " # to the left by 1.\n",
490
+ " neg_log_likelihood = outputs.loss\n",
491
+ "\n",
492
+ " nlls.append(neg_log_likelihood)\n",
493
+ "\n",
494
+ " prev_end_loc = end_loc\n",
495
+ " if end_loc == seq_len:\n",
496
+ " break\n",
497
+ "\n",
498
+ "ppl = torch.exp(torch.stack(nlls).mean())"
499
+ ]
500
+ },
501
+ {
502
+ "cell_type": "code",
503
+ "execution_count": 25,
504
+ "metadata": {},
505
+ "outputs": [
506
+ {
507
+ "data": {
508
+ "text/plain": [
509
+ "tensor(53969640., device='cuda:0')"
510
+ ]
511
+ },
512
+ "execution_count": 25,
513
+ "metadata": {},
514
+ "output_type": "execute_result"
515
+ }
516
+ ],
517
+ "source": [
518
+ "ppl"
519
+ ]
520
+ },
521
+ {
522
+ "cell_type": "code",
523
+ "execution_count": 26,
524
+ "metadata": {},
525
+ "outputs": [
526
+ {
527
+ "name": "stdout",
528
+ "output_type": "stream",
529
+ "text": [
530
+ "seq_len = 324\n"
531
+ ]
532
+ },
533
+ {
534
+ "name": "stderr",
535
+ "output_type": "stream",
536
+ "text": [
537
+ " 0%| | 0/324 [00:00<?, ?it/s]\n"
538
+ ]
539
+ },
540
+ {
541
+ "data": {
542
+ "text/plain": [
543
+ "tensor(28044616., device='cuda:0')"
544
+ ]
545
+ },
546
+ "execution_count": 26,
547
+ "metadata": {},
548
+ "output_type": "execute_result"
549
+ }
550
+ ],
551
+ "source": [
552
+ "encodings_sentence2= tokenizer(sentence2, return_tensors=\"pt\")\n",
553
+ "encodings_sentence2\n",
554
+ "import torch\n",
555
+ "from tqdm import tqdm\n",
556
+ "\n",
557
+ "# max_length = model.config.n_positions\n",
558
+ "max_length= model.config.max_position_embeddings\n",
559
+ "stride = 1\n",
560
+ "seq_len = encodings_sentence2.input_ids.size(1)\n",
561
+ "\n",
562
+ "print(f\"seq_len = {seq_len}\")\n",
563
+ "\n",
564
+ "nlls = []\n",
565
+ "prev_end_loc = 0\n",
566
+ "for begin_loc in tqdm(range(0, seq_len, stride)):\n",
567
+ " end_loc = min(begin_loc + max_length, seq_len)\n",
568
+ " trg_len = end_loc - prev_end_loc # may be different from stride on last loop\n",
569
+ " input_ids = encodings_sentence2.input_ids[:, begin_loc:end_loc].to(\"cuda\")\n",
570
+ " target_ids = input_ids.clone()\n",
571
+ " target_ids[:, :-trg_len] = -100\n",
572
+ "\n",
573
+ " with torch.no_grad():\n",
574
+ " outputs = model(input_ids, labels=target_ids)\n",
575
+ "\n",
576
+ " # loss is calculated using CrossEntropyLoss which averages over valid labels\n",
577
+ " # N.B. the model only calculates loss over trg_len - 1 labels, because it internally shifts the labels\n",
578
+ " # to the left by 1.\n",
579
+ " neg_log_likelihood = outputs.loss\n",
580
+ "\n",
581
+ " nlls.append(neg_log_likelihood)\n",
582
+ "\n",
583
+ " prev_end_loc = end_loc\n",
584
+ " if end_loc == seq_len:\n",
585
+ " break\n",
586
+ "\n",
587
+ "ppl = torch.exp(torch.stack(nlls).mean())\n",
588
+ "ppl"
589
+ ]
590
+ },
591
+ {
592
+ "cell_type": "code",
593
+ "execution_count": 27,
594
+ "metadata": {},
595
+ "outputs": [
596
+ {
597
+ "name": "stdout",
598
+ "output_type": "stream",
599
+ "text": [
600
+ "seq_len = 4\n"
601
+ ]
602
+ },
603
+ {
604
+ "name": "stderr",
605
+ "output_type": "stream",
606
+ "text": [
607
+ " 0%| | 0/4 [00:00<?, ?it/s]\n"
608
+ ]
609
+ },
610
+ {
611
+ "data": {
612
+ "text/plain": [
613
+ "tensor(67252448., device='cuda:0')"
614
+ ]
615
+ },
616
+ "execution_count": 27,
617
+ "metadata": {},
618
+ "output_type": "execute_result"
619
+ }
620
+ ],
621
+ "source": [
622
+ "encodings_sentence2= tokenizer(\"Good morning\", return_tensors=\"pt\")\n",
623
+ "encodings_sentence2\n",
624
+ "import torch\n",
625
+ "from tqdm import tqdm\n",
626
+ "\n",
627
+ "# max_length = model.config.n_positions\n",
628
+ "max_length= model.config.max_position_embeddings\n",
629
+ "stride = 1\n",
630
+ "seq_len = encodings_sentence2.input_ids.size(1)\n",
631
+ "\n",
632
+ "print(f\"seq_len = {seq_len}\")\n",
633
+ "\n",
634
+ "nlls = []\n",
635
+ "prev_end_loc = 0\n",
636
+ "for begin_loc in tqdm(range(0, seq_len, stride)):\n",
637
+ " end_loc = min(begin_loc + max_length, seq_len)\n",
638
+ " trg_len = end_loc - prev_end_loc # may be different from stride on last loop\n",
639
+ " input_ids = encodings_sentence2.input_ids[:, begin_loc:end_loc].to(\"cuda\")\n",
640
+ " target_ids = input_ids.clone()\n",
641
+ " target_ids[:, :-trg_len] = -100\n",
642
+ "\n",
643
+ " with torch.no_grad():\n",
644
+ " outputs = model(input_ids, labels=target_ids)\n",
645
+ "\n",
646
+ " # loss is calculated using CrossEntropyLoss which averages over valid labels\n",
647
+ " # N.B. the model only calculates loss over trg_len - 1 labels, because it internally shifts the labels\n",
648
+ " # to the left by 1.\n",
649
+ " neg_log_likelihood = outputs.loss\n",
650
+ "\n",
651
+ " nlls.append(neg_log_likelihood)\n",
652
+ "\n",
653
+ " prev_end_loc = end_loc\n",
654
+ " if end_loc == seq_len:\n",
655
+ " break\n",
656
+ "\n",
657
+ "ppl = torch.exp(torch.stack(nlls).mean())\n",
658
+ "ppl"
659
+ ]
660
+ },
661
+ {
662
+ "cell_type": "code",
663
+ "execution_count": 2,
664
+ "metadata": {},
665
+ "outputs": [
666
+ {
667
+ "name": "stderr",
668
+ "output_type": "stream",
669
+ "text": [
670
+ "/home/ubuntu/SentenceStructureComparision/venv/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
671
+ " from .autonotebook import tqdm as notebook_tqdm\n",
672
+ "Downloading (…)lve/main/config.json: 100%|██████████| 666/666 [00:00<00:00, 4.11MB/s]\n",
673
+ "Downloading model.safetensors: 100%|██████████| 3.25G/3.25G [00:36<00:00, 89.3MB/s]\n",
674
+ "Downloading (…)neration_config.json: 100%|██████████| 124/124 [00:00<00:00, 75.5kB/s]\n",
675
+ "Downloading (…)olve/main/vocab.json: 100%|██████████| 1.04M/1.04M [00:00<00:00, 4.88MB/s]\n",
676
+ "Downloading (…)olve/main/merges.txt: 100%|██████████| 456k/456k [00:00<00:00, 51.0MB/s]\n",
677
+ "Downloading (…)/main/tokenizer.json: 100%|██████████| 1.36M/1.36M [00:00<00:00, 70.6MB/s]\n"
678
+ ]
679
+ }
680
+ ],
681
+ "source": [
682
+ "from transformers import GPT2LMHeadModel, GPT2TokenizerFast\n",
683
+ "\n",
684
+ "device = \"cuda\"\n",
685
+ "model_id = \"gpt2-large\"\n",
686
+ "model = GPT2LMHeadModel.from_pretrained(model_id).to(device)\n",
687
+ "tokenizer = GPT2TokenizerFast.from_pretrained(model_id)\n",
688
+ "encodings = tokenizer(sentence1, return_tensors=\"pt\")"
689
+ ]
690
+ },
691
+ {
692
+ "cell_type": "code",
693
+ "execution_count": 3,
694
+ "metadata": {},
695
+ "outputs": [
696
+ {
697
+ "name": "stderr",
698
+ "output_type": "stream",
699
+ "text": [
700
+ " 0%| | 0/1 [00:00<?, ?it/s]\n"
701
+ ]
702
+ }
703
+ ],
704
+ "source": [
705
+ "import torch\n",
706
+ "from tqdm import tqdm\n",
707
+ "\n",
708
+ "max_length = model.config.n_positions\n",
709
+ "stride = 512\n",
710
+ "seq_len = encodings.input_ids.size(1)\n",
711
+ "\n",
712
+ "nlls = []\n",
713
+ "prev_end_loc = 0\n",
714
+ "for begin_loc in tqdm(range(0, seq_len, stride)):\n",
715
+ " end_loc = min(begin_loc + max_length, seq_len)\n",
716
+ " trg_len = end_loc - prev_end_loc # may be different from stride on last loop\n",
717
+ " input_ids = encodings.input_ids[:, begin_loc:end_loc].to(device)\n",
718
+ " target_ids = input_ids.clone()\n",
719
+ " target_ids[:, :-trg_len] = -100\n",
720
+ "\n",
721
+ " with torch.no_grad():\n",
722
+ " outputs = model(input_ids, labels=target_ids)\n",
723
+ "\n",
724
+ " # loss is calculated using CrossEntropyLoss which averages over valid labels\n",
725
+ " # N.B. the model only calculates loss over trg_len - 1 labels, because it internally shifts the labels\n",
726
+ " # to the left by 1.\n",
727
+ " neg_log_likelihood = outputs.loss\n",
728
+ "\n",
729
+ " nlls.append(neg_log_likelihood)\n",
730
+ "\n",
731
+ " prev_end_loc = end_loc\n",
732
+ " if end_loc == seq_len:\n",
733
+ " break\n",
734
+ "\n",
735
+ "ppl = torch.exp(torch.stack(nlls).mean())"
736
+ ]
737
+ },
738
+ {
739
+ "cell_type": "code",
740
+ "execution_count": 4,
741
+ "metadata": {},
742
+ "outputs": [
743
+ {
744
+ "data": {
745
+ "text/plain": [
746
+ "tensor(12.3761, device='cuda:0')"
747
+ ]
748
+ },
749
+ "execution_count": 4,
750
+ "metadata": {},
751
+ "output_type": "execute_result"
752
+ }
753
+ ],
754
+ "source": [
755
+ "ppl"
756
+ ]
757
+ },
758
+ {
759
+ "cell_type": "code",
760
+ "execution_count": 6,
761
+ "metadata": {},
762
+ "outputs": [
763
+ {
764
+ "name": "stdout",
765
+ "output_type": "stream",
766
+ "text": [
767
+ "seq_len = 322\n"
768
+ ]
769
+ },
770
+ {
771
+ "name": "stderr",
772
+ "output_type": "stream",
773
+ "text": [
774
+ " 0%| | 0/1 [00:00<?, ?it/s]\n"
775
+ ]
776
+ },
777
+ {
778
+ "data": {
779
+ "text/plain": [
780
+ "tensor(30.3624, device='cuda:0')"
781
+ ]
782
+ },
783
+ "execution_count": 6,
784
+ "metadata": {},
785
+ "output_type": "execute_result"
786
+ }
787
+ ],
788
+ "source": [
789
+ "encodings_sentence2= tokenizer(sentence2, return_tensors=\"pt\")\n",
790
+ "encodings_sentence2\n",
791
+ "import torch\n",
792
+ "from tqdm import tqdm\n",
793
+ "\n",
794
+ "# max_length = model.config.n_positions\n",
795
+ "max_length= model.config.max_position_embeddings\n",
796
+ "stride = 512\n",
797
+ "seq_len = encodings_sentence2.input_ids.size(1)\n",
798
+ "\n",
799
+ "print(f\"seq_len = {seq_len}\")\n",
800
+ "\n",
801
+ "nlls = []\n",
802
+ "prev_end_loc = 0\n",
803
+ "for begin_loc in tqdm(range(0, seq_len, stride)):\n",
804
+ " end_loc = min(begin_loc + max_length, seq_len)\n",
805
+ " trg_len = end_loc - prev_end_loc # may be different from stride on last loop\n",
806
+ " input_ids = encodings_sentence2.input_ids[:, begin_loc:end_loc].to(\"cuda\")\n",
807
+ " target_ids = input_ids.clone()\n",
808
+ " target_ids[:, :-trg_len] = -100\n",
809
+ "\n",
810
+ " with torch.no_grad():\n",
811
+ " outputs = model(input_ids, labels=target_ids)\n",
812
+ "\n",
813
+ " # loss is calculated using CrossEntropyLoss which averages over valid labels\n",
814
+ " # N.B. the model only calculates loss over trg_len - 1 labels, because it internally shifts the labels\n",
815
+ " # to the left by 1.\n",
816
+ " neg_log_likelihood = outputs.loss\n",
817
+ "\n",
818
+ " nlls.append(neg_log_likelihood)\n",
819
+ "\n",
820
+ " prev_end_loc = end_loc\n",
821
+ " if end_loc == seq_len:\n",
822
+ " break\n",
823
+ "\n",
824
+ "ppl = torch.exp(torch.stack(nlls).mean())\n",
825
+ "ppl"
826
+ ]
827
+ },
828
+ {
829
+ "cell_type": "code",
830
+ "execution_count": 55,
831
+ "metadata": {},
832
+ "outputs": [],
833
+ "source": [
834
+ "s1= '''Imagine you’re trying to build a chatbot that helps home cooks autocomplete their grocery shopping lists based on popular flavor combinations from social media. Your goal is to let users type in what they have in their fridge, like “chicken, carrots,” then list the five or six ingredients that go best with those flavors. You’ve already scraped thousands of recipe sites for ingredient lists, and now you just need to choose the best NLP model to predict which words appear together most often. Easy, right?\n",
835
+ "\n",
836
+ "Well, not exactly. The gold standard for checking the performance of a model is extrinsic evaluation: measuring its final performance on a real-world task. In this case, that might mean letting your model generate a dataset of a thousand new recipes, then asking a few hundred data labelers to rate how tasty they sound.\n",
837
+ "\n",
838
+ "Unfortunately, you don’t have one dataset, you have one dataset for every variation of every parameter of every model you want to test. Even simple comparisons of the same basic model can lead to a combinatorial explosion: 3 different optimization functions with 5 different learning rates and 4 different batch sizes equals 120 different datasets, all with hundreds of thousands of individual data points. How can you quickly narrow down which models are the most promising to fully evaluate?\n",
839
+ "\n",
840
+ "Enter intrinsic evaluation: finding some property of a model that estimates the model’s quality independent of the specific tasks its used to perform. Specifically, enter perplexity, a metric that quantifies how uncertain a model is about the predictions it makes. Low perplexity only guarantees a model is confident, not accurate, but it often correlates well with the model’s final real-world performance, and it can be quickly calculated using just the probability distribution the model learns from the training dataset.\n",
841
+ "\n",
842
+ "In this week’s post, we’ll look at how perplexity is calculated, what it means intuitively for a model’s performance, and the pitfalls of using perplexity for comparisons across different datasets and models.\n",
843
+ "\n",
844
+ "Calculating perplexity\n",
845
+ "To understand how perplexity is calculated, let’s start with a very simple version of the recipe training dataset that only has four short ingredient lists:\n",
846
+ "\n",
847
+ "chicken, butter, pears\n",
848
+ "chicken, butter, chili\n",
849
+ "lemon, pears, shrimp\n",
850
+ "chili, shrimp, lemon\n",
851
+ "In machine learning terms, these sentences are a language with a vocabulary size of 6 (because there are a total of 6 unique words). A language model is just a function trained on a specific language that predicts the probability of a certain word appearing given the words that appeared around it.\n",
852
+ "\n",
853
+ "One of the simplest language models is a unigram model, which looks at words one at a time assuming they’re statistically independent. In other words, it returns the relative frequency that each word appears in the training data. Here’s a unigram model for the dataset above, which is especially simple because every word appears the same number of times:\n",
854
+ "\n",
855
+ "\n",
856
+ "It’s pretty obvious this isn’t a very good model. No matter which ingredients you say you have, it will just pick any new ingredient at random with equal probability, so you might as well be rolling a fair die to choose. Let’s quantify exactly how bad this is.\n",
857
+ "\n",
858
+ "We’re going to start by calculating how surprised our model is when it sees a single specific word like “chicken.” Intuitively, the more probable an event is, the less surprising it is. If you’re certain something is impossible — if its probability is 0 — then you would be infinitely surprised if it happened. Similarly, if something was guaranteed to happen with probability 1, your surprise when it happened would be 0.'''\n",
859
+ "\n",
860
+ "\n",
861
+ "\n",
862
+ "\n",
863
+ "# generated by gpt\n",
864
+ "\n",
865
+ "s2= '''Imagine you want to create a smart assistant for people who cook at home. This assistant should help them make shopping lists based on popular food combinations they find on social media. Your aim is to allow users to type in what ingredients they already have, like \"chicken, carrots,\" and then get suggestions for the five or six best ingredients that go well with those. To do this, you've collected lots of lists of ingredients from recipe websites. Now, your main task is to pick the best computer program that can predict which ingredients often appear together. Sounds easy, right?\n",
866
+ "\n",
867
+ "But it's not that simple. The usual way to check how well a computer program works is to see how well it does on a real task. In this case, that means having your program create a thousand new recipes and then asking a few hundred people to rate how good those recipes sound.\n",
868
+ "\n",
869
+ "Here's the problem: You don't have just one set of recipes to test your program on; you have many sets, each with different settings. Even when comparing different versions of the same program, there can be a huge number of combinations to test. For example, if you have three different ways to make the program work, five different speeds for the program to learn, and four different sizes of groups of data, you end up with 120 different sets of recipes to evaluate. Each set contains hundreds of thousands of individual data points. So, how do you quickly figure out which versions of the program are the most promising to test further?\n",
870
+ "\n",
871
+ "This is where \"intrinsic evaluation\" comes in. It means finding some quality of the program that tells you how good it is without having to do the real cooking task. In this case, it's about \"perplexity,\" which is a way to measure how uncertain the program is when it makes predictions. A low perplexity score means the program is pretty sure about its predictions, though it doesn't guarantee those predictions are correct. But it often matches up with how well the program does in real cooking tasks, and it's easy to calculate using the information the program learns from the training data.\n",
872
+ "\n",
873
+ "In this post, we'll explore how to calculate perplexity, what it indicates about the program's performance, and the problems you might face when using perplexity to compare different sets of data and programs.\n",
874
+ "\n",
875
+ "Calculating Perplexity:\n",
876
+ "To understand how perplexity works, let's start with a simple example. Imagine you have a small dataset of recipes with just four short lists of ingredients:\n",
877
+ "\n",
878
+ "1. Chicken, butter, pears\n",
879
+ "2. Chicken, butter, chili\n",
880
+ "3. Lemon, pears, shrimp\n",
881
+ "4. Chili, shrimp, lemon\n",
882
+ "\n",
883
+ "In machine learning terms, these sentences form a language with only six different words. A language model is like a computer program that has been trained on this language. It predicts the likelihood of a word appearing based on the words that came before it.\n",
884
+ "\n",
885
+ "One of the simplest language models is called a \"unigram model.\" It assumes that words are independent of each other and predicts each word's frequency based on how often it appears in the training data. Here's what a unigram model for this dataset would look like, and it's pretty basic because it assigns the same probability to every word:\n",
886
+ "\n",
887
+ "This model isn't very good. No matter what ingredients you mention, it will randomly pick a new ingredient with equal likelihood. It's like rolling a fair die to choose an ingredient. Let's measure exactly how bad it is.\n",
888
+ "\n",
889
+ "First, we'll calculate how surprised the model is when it sees a specific word like \"chicken.\" The more probable an event is, the less surprising it is. If something has zero chance of happening (probability of 0), you'd be incredibly surprised if it did. On the other hand, if something is guaranteed to happen (probability of 1), you wouldn't be surprised at all when it occurs.'''"
890
+ ]
891
+ },
892
+ {
893
+ "cell_type": "code",
894
+ "execution_count": 48,
895
+ "metadata": {},
896
+ "outputs": [],
897
+ "source": [
898
+ "s1= '''Basketball is a team sport played by two teams of five players each. The primary objective is to score points by shooting the basketball through the opponent's hoop, which is mounted on a backboard 10 feet (3.048 meters) above the ground. The team with the most points at the end of the game wins. Basketball is played on a rectangular court, typically indoors, with a surface made of wood or synthetic materials. The rules and regulations are governed by various organizations, such as FIBA (International Basketball Federation) and the NBA (National Basketball Association). The following is a general outline of the basic rules of basketball:\n",
899
+ "\n",
900
+ "1. Game duration: A regulation basketball game is divided into four quarters, each lasting 12 minutes in the NBA and 10 minutes in FIBA play. College basketball in the US has two 20-minute halves. If the game is tied at the end of regulation, overtime periods are played until a winner is determined.\n",
901
+ "\n",
902
+ "2. Starting play: The game begins with a jump ball at the center of the court, where the referee throws the ball into the air, and one player from each team tries to gain possession by tapping it to a teammate.\n",
903
+ "\n",
904
+ "3. Scoring: Points are scored by shooting the ball through the hoop. A field goal made from inside the three-point arc is worth two points, while a field goal made from outside the arc is worth three points. Free throws, awarded after a foul, are worth one point each.\n",
905
+ "\n",
906
+ "4. Possession and dribbling: A player in possession of the ball must either pass it to a teammate or dribble (bounce) the ball while moving.'''"
907
+ ]
908
+ },
909
+ {
910
+ "cell_type": "code",
911
+ "execution_count": 58,
912
+ "metadata": {},
913
+ "outputs": [],
914
+ "source": [
915
+ "s2= '''Political stability is the ability of a government to maintain order and authority within its borders. It is essential for economic growth, as it provides a foundation for investment and trade.\n",
916
+ "There are many factors that contribute to political stability, including:\n",
917
+ "A strong rule of law: The rule of law is the principle that everyone is subject to the same laws, regardless of their social status or political affiliation. A strong rule of law helps to prevent corruption and ensures that everyone has equal opportunity to succeed.\n",
918
+ "A well-functioning government: A well-functioning government is one that is able to provide essential services, such as security, education, and healthcare. It is also able to manage the economy effectively and to respond to crises.\n",
919
+ "A vibrant civil society: A vibrant civil society is one that is made up of active and engaged citizens. Civil society organizations can help to hold the government accountable and to promote democracy and good governance.\n",
920
+ "Political stability is not always easy to achieve, but it is essential for economic growth. By investing in political stability, we can create a foundation for long-term prosperity.\n",
921
+ "Here are some of the benefits of political stability:\n",
922
+ "Increased investment: Investors are more likely to invest in countries that are politically stable. This can lead to increased economic growth and job creation.\n",
923
+ "Improved trade: Trade between countries is easier and more efficient when there is political stability. This can lead to lower prices for consumers and increased profits for businesses.\n",
924
+ "Reduced poverty: Political stability can help to reduce poverty by creating a more conducive environment for economic growth.\n",
925
+ "Improved quality of life: Political stability can lead to improved quality of life by providing a safer and more secure environment.\n",
926
+ "Political stability is a key ingredient for a prosperous and successful society. By investing in political stability, we can create a better future for ourselves and our children.\n",
927
+ "Here are some of the challenges to political stability:\n",
928
+ "Economic inequality: Economic inequality can lead to social unrest and instability. This is because it can create a sense of injustice and resentment among those who are not benefiting from economic growth.\n",
929
+ "Corruption: Corruption can undermine the rule of law and erode public trust in government. This can lead to instability and violence.\n",
930
+ "Ethnic and religious conflict: Ethnic and religious conflict can be a major source of instability. This is because it can lead to violence, displacement, and economic disruption.\n",
931
+ "Natural disasters: Natural disasters can also be a source of instability. This is because they can displace people, damage infrastructure, and disrupt economic activity.\n",
932
+ "Despite the challenges, there are many things that can be done to promote political stability. These include:\n",
933
+ "Investing in education and healthcare: Education and healthcare can help to reduce poverty and inequality, which are two of the main causes of instability.\n",
934
+ "Promoting good governance: Good governance is essential for building trust between the government and the people. It can be promoted by strengthening the rule of law, fighting corruption, and ensuring transparency and accountability.\n",
935
+ "Resolving conflict peacefully: Conflict can be resolved peacefully through negotiation, mediation, and other means. This can help to prevent violence and instability.\n",
936
+ "Building resilience: Building resilience is essential for coping with shocks and stresses, such as economic downturns and natural disasters. It can be done by investing in infrastructure, social safety nets, and disaster preparedness.\n",
937
+ "Political stability is a complex issue, but it is essential for economic growth and prosperity. By investing in political stability, we can create a better future for ourselves and our children.'''"
938
+ ]
939
+ },
940
+ {
941
+ "cell_type": "code",
942
+ "execution_count": 65,
943
+ "metadata": {},
944
+ "outputs": [],
945
+ "source": [
946
+ "s1= '''In a quiet, picturesque village nestled deep within the lush, rolling hills of the countryside, there stood a charming, centuries-old cottage, its timeworn facade adorned with colorful flowers that cascaded down from window boxes. The cottage, with its rustic charm, had seen generations come and go, witnessed countless stories unfold within its sturdy walls. Each morning, as the sun cast its golden rays upon the sleepy hamlet, the villagers would wake to the melodious chirping of birds, their cheerful songs serving as a gentle alarm clock. Life in the village was slow-paced, a stark contrast to the bustling cities with their constant noise and ceaseless activity. Time seemed to move differently here, as if the world beyond the village's borders existed in a parallel universe, always in a hurry, while the village embraced a rhythm that ebbed and flowed with the changing seasons. The villagers, bound by a strong sense of community, gathered for festivals, sharing laughter and stories around bonfires that crackled in the cool night air. Generations of families had lived in this idyllic haven, passing down stories, traditions, and the enduring spirit of the village from one age to the next, ensuring that the passage of time only deepened their connection to this place they called home.'''\n"
947
+ ]
948
+ },
949
+ {
950
+ "cell_type": "code",
951
+ "execution_count": 86,
952
+ "metadata": {},
953
+ "outputs": [],
954
+ "source": [
955
+ "s1= \"\"\"The Mission Impossible franchise, a timeless icon in the world of espionage thrillers, has held audiences captive for decades with its electrifying fusion of high-stakes action, intricate espionage plots, and mind-boggling twists. Tom Cruise, a true embodiment of Ethan Hunt, the daring and ingenious secret agent, has forever etched his name alongside the series, leading a team of accomplished operatives on missions that, at first glance, appear insurmountably challenging, destined to thwart global threats. With every new installment, viewers are treated to a whirlwind of meticulously staged action sequences, Tom Cruise's jaw-dropping stunts – performed by the man himself, and a labyrinth of betrayals and double-crosses that keeps everyone on the edge of their seats, leaving them guessing until the very last, suspenseful moments. The franchise's enduring charm stems from its unyielding commitment to pushing cinematic action's boundaries, making sure that each mission remains an impossibly thrilling spectacle that unfolds relentlessly, offering a rollercoaster of excitement for fans of all ages.\"\"\""
956
+ ]
957
+ },
958
+ {
959
+ "cell_type": "code",
960
+ "execution_count": 87,
961
+ "metadata": {},
962
+ "outputs": [
963
+ {
964
+ "data": {
965
+ "text/plain": [
966
+ "['The Mission Impossible franchise, a timeless icon in the world of espionage thrillers, has held audiences captive for decades with its electrifying fusion of high-stakes action, intricate espionage plots, and mind-boggling twists',\n",
967
+ " ' Tom Cruise, a true embodiment of Ethan Hunt, the daring and ingenious secret agent, has forever etched his name alongside the series, leading a team of accomplished operatives on missions that, at first glance, appear insurmountably challenging, destined to thwart global threats',\n",
968
+ " \" With every new installment, viewers are treated to a whirlwind of meticulously staged action sequences, Tom Cruise's jaw-dropping stunts – performed by the man himself, and a labyrinth of betrayals and double-crosses that keeps everyone on the edge of their seats, leaving them guessing until the very last, suspenseful moments\",\n",
969
+ " \" The franchise's enduring charm stems from its unyielding commitment to pushing cinematic action's boundaries, making sure that each mission remains an impossibly thrilling spectacle that unfolds relentlessly, offering a rollercoaster of excitement for fans of all ages\",\n",
970
+ " '']"
971
+ ]
972
+ },
973
+ "execution_count": 87,
974
+ "metadata": {},
975
+ "output_type": "execute_result"
976
+ }
977
+ ],
978
+ "source": [
979
+ "# number of sentences\n",
980
+ "# list_of_sentences1= sentence1.split('.')\n",
981
+ "s1= s1.replace('\\n', ' ')\n",
982
+ "list_of_sentences1= s1.split('.')\n",
983
+ "list_of_sentences1\n",
984
+ "\n",
985
+ "\n",
986
+ "# x= []\n",
987
+ "# for i in list_of_sentences1:\n",
988
+ "# if len(i)>10:\n",
989
+ "# x.append(i)\n",
990
+ " \n",
991
+ "list_of_sentences1"
992
+ ]
993
+ },
994
+ {
995
+ "cell_type": "code",
996
+ "execution_count": 61,
997
+ "metadata": {},
998
+ "outputs": [
999
+ {
1000
+ "data": {
1001
+ "text/plain": [
1002
+ "['Political stability is the ability of a government to maintain order and authority within its borders',\n",
1003
+ " ' It is essential for economic growth, as it provides a foundation for investment and trade',\n",
1004
+ " '\\nThere are many factors that contribute to political stability, including:\\nA strong rule of law: The rule of law is the principle that everyone is subject to the same laws, regardless of their social status or political affiliation',\n",
1005
+ " ' A strong rule of law helps to prevent corruption and ensures that everyone has equal opportunity to succeed',\n",
1006
+ " '\\nA well-functioning government: A well-functioning government is one that is able to provide essential services, such as security, education, and healthcare',\n",
1007
+ " ' It is also able to manage the economy effectively and to respond to crises',\n",
1008
+ " '\\nA vibrant civil society: A vibrant civil society is one that is made up of active and engaged citizens',\n",
1009
+ " ' Civil society organizations can help to hold the government accountable and to promote democracy and good governance',\n",
1010
+ " '\\nPolitical stability is not always easy to achieve, but it is essential for economic growth',\n",
1011
+ " ' By investing in political stability, we can create a foundation for long-term prosperity',\n",
1012
+ " '\\nHere are some of the benefits of political stability:\\nIncreased investment: Investors are more likely to invest in countries that are politically stable',\n",
1013
+ " ' This can lead to increased economic growth and job creation',\n",
1014
+ " '\\nImproved trade: Trade between countries is easier and more efficient when there is political stability',\n",
1015
+ " ' This can lead to lower prices for consumers and increased profits for businesses',\n",
1016
+ " '\\nReduced poverty: Political stability can help to reduce poverty by creating a more conducive environment for economic growth',\n",
1017
+ " '\\nImproved quality of life: Political stability can lead to improved quality of life by providing a safer and more secure environment',\n",
1018
+ " '\\nPolitical stability is a key ingredient for a prosperous and successful society',\n",
1019
+ " ' By investing in political stability, we can create a better future for ourselves and our children',\n",
1020
+ " '\\nHere are some of the challenges to political stability:\\nEconomic inequality: Economic inequality can lead to social unrest and instability',\n",
1021
+ " ' This is because it can create a sense of injustice and resentment among those who are not benefiting from economic growth',\n",
1022
+ " '\\nCorruption: Corruption can undermine the rule of law and erode public trust in government',\n",
1023
+ " ' This can lead to instability and violence',\n",
1024
+ " '\\nEthnic and religious conflict: Ethnic and religious conflict can be a major source of instability',\n",
1025
+ " ' This is because it can lead to violence, displacement, and economic disruption',\n",
1026
+ " '\\nNatural disasters: Natural disasters can also be a source of instability',\n",
1027
+ " ' This is because they can displace people, damage infrastructure, and disrupt economic activity',\n",
1028
+ " '\\nDespite the challenges, there are many things that can be done to promote political stability',\n",
1029
+ " ' These include:\\nInvesting in education and healthcare: Education and healthcare can help to reduce poverty and inequality, which are two of the main causes of instability',\n",
1030
+ " '\\nPromoting good governance: Good governance is essential for building trust between the government and the people',\n",
1031
+ " ' It can be promoted by strengthening the rule of law, fighting corruption, and ensuring transparency and accountability',\n",
1032
+ " '\\nResolving conflict peacefully: Conflict can be resolved peacefully through negotiation, mediation, and other means',\n",
1033
+ " ' This can help to prevent violence and instability',\n",
1034
+ " '\\nBuilding resilience: Building resilience is essential for coping with shocks and stresses, such as economic downturns and natural disasters',\n",
1035
+ " ' It can be done by investing in infrastructure, social safety nets, and disaster preparedness',\n",
1036
+ " '\\nPolitical stability is a complex issue, but it is essential for economic growth and prosperity',\n",
1037
+ " ' By investing in political stability, we can create a better future for ourselves and our children',\n",
1038
+ " '']"
1039
+ ]
1040
+ },
1041
+ "execution_count": 61,
1042
+ "metadata": {},
1043
+ "output_type": "execute_result"
1044
+ }
1045
+ ],
1046
+ "source": [
1047
+ "# list_of_sentences2= sentence2.split('.')\n",
1048
+ "# s2= s2.replace('\\n', ' ')\n",
1049
+ "\n",
1050
+ "list_of_sentences2= s2.split('.')\n",
1051
+ "list_of_sentences2"
1052
+ ]
1053
+ },
1054
+ {
1055
+ "cell_type": "code",
1056
+ "execution_count": null,
1057
+ "metadata": {},
1058
+ "outputs": [],
1059
+ "source": []
1060
+ },
1061
+ {
1062
+ "cell_type": "code",
1063
+ "execution_count": null,
1064
+ "metadata": {},
1065
+ "outputs": [],
1066
+ "source": []
1067
+ },
1068
+ {
1069
+ "cell_type": "code",
1070
+ "execution_count": 32,
1071
+ "metadata": {},
1072
+ "outputs": [],
1073
+ "source": [
1074
+ "import numpy as np\n",
1075
+ "def calculate_burst(list_of_sentences):\n",
1076
+ " arr= []\n",
1077
+ " for i in list_of_sentences:\n",
1078
+ " ei= tokenizer(i, return_tensors=\"pt\")\n",
1079
+ " arr.append(ei.input_ids.size(1))\n",
1080
+ " print(f\"arr= {(arr)}\")\n",
1081
+ " return np.var(np.array(arr))\n",
1082
+ " "
1083
+ ]
1084
+ },
1085
+ {
1086
+ "cell_type": "code",
1087
+ "execution_count": 78,
1088
+ "metadata": {},
1089
+ "outputs": [
1090
+ {
1091
+ "name": "stdout",
1092
+ "output_type": "stream",
1093
+ "text": [
1094
+ "arr= [41, 27, 24, 37, 24, 28, 42, 35, 24, 11, 23, 21, 29, 40]\n"
1095
+ ]
1096
+ },
1097
+ {
1098
+ "data": {
1099
+ "text/plain": [
1100
+ "74.14285714285714"
1101
+ ]
1102
+ },
1103
+ "execution_count": 78,
1104
+ "metadata": {},
1105
+ "output_type": "execute_result"
1106
+ }
1107
+ ],
1108
+ "source": [
1109
+ "calculate_burst(list_of_sentences1[:-1])"
1110
+ ]
1111
+ },
1112
+ {
1113
+ "cell_type": "code",
1114
+ "execution_count": 63,
1115
+ "metadata": {},
1116
+ "outputs": [
1117
+ {
1118
+ "name": "stdout",
1119
+ "output_type": "stream",
1120
+ "text": [
1121
+ "arr= [16, 16, 44, 18, 32, 14, 21, 17, 17, 16, 27, 10, 17, 13, 21, 23, 13, 17, 24, 21, 18, 7, 18, 14, 13, 16, 17, 31, 19, 19, 19, 8, 23, 17, 17, 17]\n"
1122
+ ]
1123
+ },
1124
+ {
1125
+ "data": {
1126
+ "text/plain": [
1127
+ "45.57098765432099"
1128
+ ]
1129
+ },
1130
+ "execution_count": 63,
1131
+ "metadata": {},
1132
+ "output_type": "execute_result"
1133
+ }
1134
+ ],
1135
+ "source": [
1136
+ "calculate_burst(list_of_sentences2[:-1])"
1137
+ ]
1138
+ },
1139
+ {
1140
+ "cell_type": "code",
1141
+ "execution_count": null,
1142
+ "metadata": {},
1143
+ "outputs": [],
1144
+ "source": []
1145
+ },
1146
+ {
1147
+ "cell_type": "markdown",
1148
+ "metadata": {},
1149
+ "source": [
1150
+ "# Method 3: `Summarization via Language Model`\n"
1151
+ ]
1152
+ },
1153
+ {
1154
+ "cell_type": "code",
1155
+ "execution_count": 22,
1156
+ "metadata": {},
1157
+ "outputs": [],
1158
+ "source": [
1159
+ "# Use a pipeline as a high-level helper\n",
1160
+ "from transformers import pipeline\n",
1161
+ "\n",
1162
+ "pipe = pipeline(\"summarization\", model=\"DunnBC22/flan-t5-base-text_summarization_data\", device=\"cuda\")"
1163
+ ]
1164
+ },
1165
+ {
1166
+ "cell_type": "code",
1167
+ "execution_count": 23,
1168
+ "metadata": {},
1169
+ "outputs": [
1170
+ {
1171
+ "data": {
1172
+ "text/plain": [
1173
+ "[{'summary_text': 'Understand water intoxication. Recognize water poisoning. Understand the dangers of excessive water intake. Understand how water can be considered a poison. Understand that water is considered one of the least toxic chemical compounds, with an LD50 exceeding 90 ml/kg in rats.'},\n",
1174
+ " {'summary_text': 'Understand hyponatremia. Recognize the dangers of aqueous intoxication. Understand the causes of aqua inebriation. Identify the cause of venomous aquious content. Describe the effects of water on human health.'}]"
1175
+ ]
1176
+ },
1177
+ "execution_count": 23,
1178
+ "metadata": {},
1179
+ "output_type": "execute_result"
1180
+ }
1181
+ ],
1182
+ "source": [
1183
+ "pipe([sentence1, sentence2], min_length=50, max_length=200)"
1184
+ ]
1185
+ },
1186
+ {
1187
+ "cell_type": "code",
1188
+ "execution_count": 17,
1189
+ "metadata": {},
1190
+ "outputs": [],
1191
+ "source": [
1192
+ "# # Use a pipeline as a high-level helper\n",
1193
+ "# from transformers import pipeline\n",
1194
+ "\n",
1195
+ "# pipe = pipeline(\"summarization\", model=\"google/pegasus-cnn_dailymail\", device=\"cuda\")"
1196
+ ]
1197
+ },
1198
+ {
1199
+ "cell_type": "code",
1200
+ "execution_count": null,
1201
+ "metadata": {},
1202
+ "outputs": [
1203
+ {
1204
+ "data": {
1205
+ "text/plain": [
1206
+ "[{'summary_text': 'Cat ear headphones are popular among otakus, streamers, gamers, and anyone who wants a uniquely cute look .<n>Design is the priority, but sound quality should also be considered .'},\n",
1207
+ " {'summary_text': 'Feline-themed audio headgear enjoys favor among aficionados of anime and gaming, as well as content creators .<n>Seek out headphones delivering crystal-clear and precise audio, and assess their suitability for both mature users and youngsters'}]"
1208
+ ]
1209
+ },
1210
+ "execution_count": 4,
1211
+ "metadata": {},
1212
+ "output_type": "execute_result"
1213
+ }
1214
+ ],
1215
+ "source": [
1216
+ "# pipe([sentence1, sentence2], min_length=50, max_length=200)"
1217
+ ]
1218
+ },
1219
+ {
1220
+ "cell_type": "code",
1221
+ "execution_count": null,
1222
+ "metadata": {},
1223
+ "outputs": [],
1224
+ "source": []
1225
+ }
1226
+ ],
1227
+ "metadata": {
1228
+ "kernelspec": {
1229
+ "display_name": "venv",
1230
+ "language": "python",
1231
+ "name": "python3"
1232
+ },
1233
+ "language_info": {
1234
+ "codemirror_mode": {
1235
+ "name": "ipython",
1236
+ "version": 3
1237
+ },
1238
+ "file_extension": ".py",
1239
+ "mimetype": "text/x-python",
1240
+ "name": "python",
1241
+ "nbconvert_exporter": "python",
1242
+ "pygments_lexer": "ipython3",
1243
+ "version": "3.10.12"
1244
+ },
1245
+ "orig_nbformat": 4
1246
+ },
1247
+ "nbformat": 4,
1248
+ "nbformat_minor": 2
1249
+ }
research/02_dl_Ai_checker.ipynb ADDED
@@ -0,0 +1,1220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import os; os.chdir('..');"
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "code",
14
+ "execution_count": 3,
15
+ "metadata": {},
16
+ "outputs": [],
17
+ "source": [
18
+ "from datasets import Dataset, load_dataset"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "code",
23
+ "execution_count": 4,
24
+ "metadata": {},
25
+ "outputs": [
26
+ {
27
+ "name": "stderr",
28
+ "output_type": "stream",
29
+ "text": [
30
+ "Downloading readme: 100%|██████████| 2.63k/2.63k [00:00<00:00, 19.4MB/s]\n",
31
+ "Downloading data: 100%|██████████| 127M/127M [00:04<00:00, 30.3MB/s]\n",
32
+ "Downloading data files: 100%|██████████| 1/1 [00:04<00:00, 4.21s/it]\n",
33
+ "Extracting data files: 100%|██████████| 1/1 [00:02<00:00, 2.09s/it]\n",
34
+ "Generating train split: 150000 examples [00:04, 30015.64 examples/s]\n"
35
+ ]
36
+ }
37
+ ],
38
+ "source": [
39
+ "dataset_name= \"aadityaubhat/GPT-wiki-intro\"\n",
40
+ "\n",
41
+ "\n",
42
+ "\n",
43
+ "dataset= load_dataset(dataset_name)"
44
+ ]
45
+ },
46
+ {
47
+ "cell_type": "code",
48
+ "execution_count": 5,
49
+ "metadata": {},
50
+ "outputs": [
51
+ {
52
+ "data": {
53
+ "text/plain": [
54
+ "DatasetDict({\n",
55
+ " train: Dataset({\n",
56
+ " features: ['id', 'url', 'title', 'wiki_intro', 'generated_intro', 'title_len', 'wiki_intro_len', 'generated_intro_len', 'prompt', 'generated_text', 'prompt_tokens', 'generated_text_tokens'],\n",
57
+ " num_rows: 150000\n",
58
+ " })\n",
59
+ "})"
60
+ ]
61
+ },
62
+ "execution_count": 5,
63
+ "metadata": {},
64
+ "output_type": "execute_result"
65
+ }
66
+ ],
67
+ "source": [
68
+ "dataset"
69
+ ]
70
+ },
71
+ {
72
+ "cell_type": "code",
73
+ "execution_count": 8,
74
+ "metadata": {},
75
+ "outputs": [
76
+ {
77
+ "name": "stderr",
78
+ "output_type": "stream",
79
+ "text": [
80
+ "Creating CSV from Arrow format: 100%|██████████| 150/150 [00:08<00:00, 18.69ba/s]\n"
81
+ ]
82
+ },
83
+ {
84
+ "data": {
85
+ "text/plain": [
86
+ "443537732"
87
+ ]
88
+ },
89
+ "execution_count": 8,
90
+ "metadata": {},
91
+ "output_type": "execute_result"
92
+ }
93
+ ],
94
+ "source": [
95
+ "dataset['train'].to_csv(\"data/original_data.csv\")"
96
+ ]
97
+ },
98
+ {
99
+ "cell_type": "code",
100
+ "execution_count": 2,
101
+ "metadata": {},
102
+ "outputs": [
103
+ {
104
+ "data": {
105
+ "text/html": [
106
+ "<div>\n",
107
+ "<style scoped>\n",
108
+ " .dataframe tbody tr th:only-of-type {\n",
109
+ " vertical-align: middle;\n",
110
+ " }\n",
111
+ "\n",
112
+ " .dataframe tbody tr th {\n",
113
+ " vertical-align: top;\n",
114
+ " }\n",
115
+ "\n",
116
+ " .dataframe thead th {\n",
117
+ " text-align: right;\n",
118
+ " }\n",
119
+ "</style>\n",
120
+ "<table border=\"1\" class=\"dataframe\">\n",
121
+ " <thead>\n",
122
+ " <tr style=\"text-align: right;\">\n",
123
+ " <th></th>\n",
124
+ " <th>id</th>\n",
125
+ " <th>url</th>\n",
126
+ " <th>title</th>\n",
127
+ " <th>wiki_intro</th>\n",
128
+ " <th>generated_intro</th>\n",
129
+ " <th>title_len</th>\n",
130
+ " <th>wiki_intro_len</th>\n",
131
+ " <th>generated_intro_len</th>\n",
132
+ " <th>prompt</th>\n",
133
+ " <th>generated_text</th>\n",
134
+ " <th>prompt_tokens</th>\n",
135
+ " <th>generated_text_tokens</th>\n",
136
+ " </tr>\n",
137
+ " </thead>\n",
138
+ " <tbody>\n",
139
+ " <tr>\n",
140
+ " <th>0</th>\n",
141
+ " <td>63064638</td>\n",
142
+ " <td>https://en.wikipedia.org/wiki/Sexhow%20railway...</td>\n",
143
+ " <td>Sexhow railway station</td>\n",
144
+ " <td>Sexhow railway station was a railway station b...</td>\n",
145
+ " <td>Sexhow railway station was a railway station l...</td>\n",
146
+ " <td>3</td>\n",
147
+ " <td>174</td>\n",
148
+ " <td>78</td>\n",
149
+ " <td>200 word wikipedia style introduction on 'Sexh...</td>\n",
150
+ " <td>located in the town of Sexhow, on the Cumbria...</td>\n",
151
+ " <td>25</td>\n",
152
+ " <td>88</td>\n",
153
+ " </tr>\n",
154
+ " <tr>\n",
155
+ " <th>1</th>\n",
156
+ " <td>279621</td>\n",
157
+ " <td>https://en.wikipedia.org/wiki/Eti%C3%A4inen</td>\n",
158
+ " <td>Etiäinen</td>\n",
159
+ " <td>In Finnish folklore, all places and things, an...</td>\n",
160
+ " <td>In Finnish folklore, all places and things, an...</td>\n",
161
+ " <td>1</td>\n",
162
+ " <td>187</td>\n",
163
+ " <td>80</td>\n",
164
+ " <td>200 word wikipedia style introduction on 'Etiä...</td>\n",
165
+ " <td>animate or inanimate, have a spirit or \"etiäi...</td>\n",
166
+ " <td>26</td>\n",
167
+ " <td>101</td>\n",
168
+ " </tr>\n",
169
+ " <tr>\n",
170
+ " <th>2</th>\n",
171
+ " <td>287229</td>\n",
172
+ " <td>https://en.wikipedia.org/wiki/Inverse%20functi...</td>\n",
173
+ " <td>Inverse function theorem</td>\n",
174
+ " <td>In mathematics, specifically differential calc...</td>\n",
175
+ " <td>In mathematics, specifically differential calc...</td>\n",
176
+ " <td>3</td>\n",
177
+ " <td>170</td>\n",
178
+ " <td>59</td>\n",
179
+ " <td>200 word wikipedia style introduction on 'Inve...</td>\n",
180
+ " <td>function theorem states that for every real-v...</td>\n",
181
+ " <td>26</td>\n",
182
+ " <td>65</td>\n",
183
+ " </tr>\n",
184
+ " <tr>\n",
185
+ " <th>3</th>\n",
186
+ " <td>26712375</td>\n",
187
+ " <td>https://en.wikipedia.org/wiki/Stepping%20on%20...</td>\n",
188
+ " <td>Stepping on Roses</td>\n",
189
+ " <td>is a Japanese shōjo manga series written and i...</td>\n",
190
+ " <td>is a Japanese shōjo manga series written and i...</td>\n",
191
+ " <td>3</td>\n",
192
+ " <td>335</td>\n",
193
+ " <td>121</td>\n",
194
+ " <td>200 word wikipedia style introduction on 'Step...</td>\n",
195
+ " <td>and illustrated by Maki Fujii. The series fol...</td>\n",
196
+ " <td>26</td>\n",
197
+ " <td>150</td>\n",
198
+ " </tr>\n",
199
+ " <tr>\n",
200
+ " <th>4</th>\n",
201
+ " <td>38894426</td>\n",
202
+ " <td>https://en.wikipedia.org/wiki/Rob%20Bradley</td>\n",
203
+ " <td>Rob Bradley</td>\n",
204
+ " <td>Robert Milner \"Rob\" Bradley, Jr. (born August ...</td>\n",
205
+ " <td>Robert Milner \"Rob\" Bradley, Jr. (born August ...</td>\n",
206
+ " <td>2</td>\n",
207
+ " <td>170</td>\n",
208
+ " <td>136</td>\n",
209
+ " <td>200 word wikipedia style introduction on 'Rob ...</td>\n",
210
+ " <td>29, 1973) is an American former professional ...</td>\n",
211
+ " <td>28</td>\n",
212
+ " <td>162</td>\n",
213
+ " </tr>\n",
214
+ " </tbody>\n",
215
+ "</table>\n",
216
+ "</div>"
217
+ ],
218
+ "text/plain": [
219
+ " id url \\\n",
220
+ "0 63064638 https://en.wikipedia.org/wiki/Sexhow%20railway... \n",
221
+ "1 279621 https://en.wikipedia.org/wiki/Eti%C3%A4inen \n",
222
+ "2 287229 https://en.wikipedia.org/wiki/Inverse%20functi... \n",
223
+ "3 26712375 https://en.wikipedia.org/wiki/Stepping%20on%20... \n",
224
+ "4 38894426 https://en.wikipedia.org/wiki/Rob%20Bradley \n",
225
+ "\n",
226
+ " title \\\n",
227
+ "0 Sexhow railway station \n",
228
+ "1 Etiäinen \n",
229
+ "2 Inverse function theorem \n",
230
+ "3 Stepping on Roses \n",
231
+ "4 Rob Bradley \n",
232
+ "\n",
233
+ " wiki_intro \\\n",
234
+ "0 Sexhow railway station was a railway station b... \n",
235
+ "1 In Finnish folklore, all places and things, an... \n",
236
+ "2 In mathematics, specifically differential calc... \n",
237
+ "3 is a Japanese shōjo manga series written and i... \n",
238
+ "4 Robert Milner \"Rob\" Bradley, Jr. (born August ... \n",
239
+ "\n",
240
+ " generated_intro title_len \\\n",
241
+ "0 Sexhow railway station was a railway station l... 3 \n",
242
+ "1 In Finnish folklore, all places and things, an... 1 \n",
243
+ "2 In mathematics, specifically differential calc... 3 \n",
244
+ "3 is a Japanese shōjo manga series written and i... 3 \n",
245
+ "4 Robert Milner \"Rob\" Bradley, Jr. (born August ... 2 \n",
246
+ "\n",
247
+ " wiki_intro_len generated_intro_len \\\n",
248
+ "0 174 78 \n",
249
+ "1 187 80 \n",
250
+ "2 170 59 \n",
251
+ "3 335 121 \n",
252
+ "4 170 136 \n",
253
+ "\n",
254
+ " prompt \\\n",
255
+ "0 200 word wikipedia style introduction on 'Sexh... \n",
256
+ "1 200 word wikipedia style introduction on 'Etiä... \n",
257
+ "2 200 word wikipedia style introduction on 'Inve... \n",
258
+ "3 200 word wikipedia style introduction on 'Step... \n",
259
+ "4 200 word wikipedia style introduction on 'Rob ... \n",
260
+ "\n",
261
+ " generated_text prompt_tokens \\\n",
262
+ "0 located in the town of Sexhow, on the Cumbria... 25 \n",
263
+ "1 animate or inanimate, have a spirit or \"etiäi... 26 \n",
264
+ "2 function theorem states that for every real-v... 26 \n",
265
+ "3 and illustrated by Maki Fujii. The series fol... 26 \n",
266
+ "4 29, 1973) is an American former professional ... 28 \n",
267
+ "\n",
268
+ " generated_text_tokens \n",
269
+ "0 88 \n",
270
+ "1 101 \n",
271
+ "2 65 \n",
272
+ "3 150 \n",
273
+ "4 162 "
274
+ ]
275
+ },
276
+ "execution_count": 2,
277
+ "metadata": {},
278
+ "output_type": "execute_result"
279
+ }
280
+ ],
281
+ "source": [
282
+ "import pandas as pd\n",
283
+ "\n",
284
+ "df= pd.read_csv(\"data/original_data.csv\")\n",
285
+ "df.head()"
286
+ ]
287
+ },
288
+ {
289
+ "cell_type": "code",
290
+ "execution_count": 3,
291
+ "metadata": {},
292
+ "outputs": [
293
+ {
294
+ "data": {
295
+ "text/html": [
296
+ "<div>\n",
297
+ "<style scoped>\n",
298
+ " .dataframe tbody tr th:only-of-type {\n",
299
+ " vertical-align: middle;\n",
300
+ " }\n",
301
+ "\n",
302
+ " .dataframe tbody tr th {\n",
303
+ " vertical-align: top;\n",
304
+ " }\n",
305
+ "\n",
306
+ " .dataframe thead th {\n",
307
+ " text-align: right;\n",
308
+ " }\n",
309
+ "</style>\n",
310
+ "<table border=\"1\" class=\"dataframe\">\n",
311
+ " <thead>\n",
312
+ " <tr style=\"text-align: right;\">\n",
313
+ " <th></th>\n",
314
+ " <th>id</th>\n",
315
+ " <th>url</th>\n",
316
+ " <th>title</th>\n",
317
+ " <th>wiki_intro</th>\n",
318
+ " <th>generated_intro</th>\n",
319
+ " <th>title_len</th>\n",
320
+ " <th>wiki_intro_len</th>\n",
321
+ " <th>generated_intro_len</th>\n",
322
+ " <th>prompt</th>\n",
323
+ " <th>generated_text</th>\n",
324
+ " <th>prompt_tokens</th>\n",
325
+ " <th>generated_text_tokens</th>\n",
326
+ " </tr>\n",
327
+ " </thead>\n",
328
+ " <tbody>\n",
329
+ " <tr>\n",
330
+ " <th>5</th>\n",
331
+ " <td>26709147</td>\n",
332
+ " <td>https://en.wikipedia.org/wiki/Moluccans</td>\n",
333
+ " <td>Moluccans</td>\n",
334
+ " <td>Moluccans are the Austronesian-speaking and Pa...</td>\n",
335
+ " <td>Moluccans are the Austronesian-speaking and Pa...</td>\n",
336
+ " <td>1</td>\n",
337
+ " <td>253</td>\n",
338
+ " <td>164</td>\n",
339
+ " <td>200 word wikipedia style introduction on 'Molu...</td>\n",
340
+ " <td>groups inhabiting the Maluku Islands. The ter...</td>\n",
341
+ " <td>33</td>\n",
342
+ " <td>238</td>\n",
343
+ " </tr>\n",
344
+ " </tbody>\n",
345
+ "</table>\n",
346
+ "</div>"
347
+ ],
348
+ "text/plain": [
349
+ " id url title \\\n",
350
+ "5 26709147 https://en.wikipedia.org/wiki/Moluccans Moluccans \n",
351
+ "\n",
352
+ " wiki_intro \\\n",
353
+ "5 Moluccans are the Austronesian-speaking and Pa... \n",
354
+ "\n",
355
+ " generated_intro title_len \\\n",
356
+ "5 Moluccans are the Austronesian-speaking and Pa... 1 \n",
357
+ "\n",
358
+ " wiki_intro_len generated_intro_len \\\n",
359
+ "5 253 164 \n",
360
+ "\n",
361
+ " prompt \\\n",
362
+ "5 200 word wikipedia style introduction on 'Molu... \n",
363
+ "\n",
364
+ " generated_text prompt_tokens \\\n",
365
+ "5 groups inhabiting the Maluku Islands. The ter... 33 \n",
366
+ "\n",
367
+ " generated_text_tokens \n",
368
+ "5 238 "
369
+ ]
370
+ },
371
+ "execution_count": 3,
372
+ "metadata": {},
373
+ "output_type": "execute_result"
374
+ }
375
+ ],
376
+ "source": [
377
+ "# df_0= df.head(1)\n",
378
+ "# df_0= df.iloc[1] # series result \n",
379
+ "df_0= df.iloc[[5]] # dataframe result\n",
380
+ "\n",
381
+ "df_0"
382
+ ]
383
+ },
384
+ {
385
+ "cell_type": "code",
386
+ "execution_count": 4,
387
+ "metadata": {},
388
+ "outputs": [
389
+ {
390
+ "name": "stdout",
391
+ "output_type": "stream",
392
+ "text": [
393
+ "title= ['Moluccans']\n",
394
+ "\n",
395
+ "wiki_intro= ['Moluccans are the Austronesian-speaking and Papuan-speaking ethnic groups indigenous to the Maluku Islands, also called the Moluccas and historically known as the Spice Islands, which as a region has been annexed by Indonesia since the end of 1950. As such, \"Moluccans\" is used as a blanket term for the various ethnic and linguistic groups native to the islands.\\n\\t\\nThe original inhabitants of the Maluku Islands were Austronesian and Melanesian in origin. Austronesian peoples partially assimilated the native Melanesian population in terms of linguistics and other areas, around 2000\\xa0BCE due to extensive trade, making Malayo-Polynesian creole languages the lingua franca in most of the region. Later added to this were several Dutch, Chinese, Portuguese, Spanish, Arabian and English influences due to colonization, marriage with foreign traders during the Silk-route era and Middle ages, and even with European soldiers during the World Wars. A small number of German descendants was added to Moluccan population, especially in Ambon, along with arrival of Protestant Missionaries since 16th century. Moluccans are predominantly Christian like many other Melanesians, but Muslim villages are also present. Despite religious differences, all groups share strong cultural bonds and a sense of common identity, such as through Adat, Pela and Bongso traditions. Music is also a binding factor, playing an important role in the cultural identity. Moluccans historically tend to be a musically gifted people, excelling in creative areas such as singing and sports. In recognition, the Moluccan capital city of Ambon was awarded the official status of City of Music by UNESCO in 2019.']\n",
396
+ "\n",
397
+ "generated_intro= ['Moluccans are the Austronesian-speaking and Papuan-speaking ethnic groups inhabiting the Maluku Islands. The term \"Moluccan\" is an umbrella term that covers the various Austronesian and Papuan languages spoken on the islands. The largest group of Moluccans are the Tolo-speaking people.\\n\\nThe Maluku Islands are a group of volcanic islands in eastern Indonesia, located about 1,000 kilometres east of Java and 2,000 kilometres south of New Guinea. The islands comprise over 700 islands, with a total land area of approximately 245,000 square kilometres. They have a population of around 1 million people, most of whom are Muslim. The largest island group is the Tolo-speaking region, which comprises over 60% of the population.\\n\\nThe Moluccas were first explored by Europeans in 1512. Portuguese explorer João da Nova discovered Ternate and Tidore, while Dutch explorer Jacob Roggeveen discovered Halmahera and Bougainville. The Moluccas were later visited by Spanish explorer Ferdinand Magellan in 1521. British explorer James Cook visited Ternate, Tidore, Halmahera, and Bougainville during his first voyage in 1770–71.']\n",
398
+ "\n",
399
+ "prompt= [\"200 word wikipedia style introduction on 'Moluccans'\\n Moluccans are the Austronesian-speaking and Papuan-speaking ethnic\"]\n",
400
+ "\n",
401
+ "generated_text= [' groups inhabiting the Maluku Islands. The term \"Moluccan\" is an umbrella term that covers the various Austronesian and Papuan languages spoken on the islands. The largest group of Moluccans are the Tolo-speaking people.\\n\\nThe Maluku Islands are a group of volcanic islands in eastern Indonesia, located about 1,000 kilometres east of Java and 2,000 kilometres south of New Guinea. The islands comprise over 700 islands, with a total land area of approximately 245,000 square kilometres. They have a population of around 1 million people, most of whom are Muslim. The largest island group is the Tolo-speaking region, which comprises over 60% of the population.\\n\\nThe Moluccas were first explored by Europeans in 1512. Portuguese explorer João da Nova discovered Ternate and Tidore, while Dutch explorer Jacob Roggeveen discovered Halmahera and Bougainville. The Moluccas were later visited by Spanish explorer Ferdinand Magellan in 1521. British explorer James Cook visited Ternate, Tidore, Halmahera, and Bougainville during his first voyage in 1770–71.']\n"
402
+ ]
403
+ }
404
+ ],
405
+ "source": [
406
+ "print(f\"title= {df_0.title.values}\")\n",
407
+ "print()\n",
408
+ "print(f\"wiki_intro= {df_0.wiki_intro.values}\")\n",
409
+ "print()\n",
410
+ "print(f\"generated_intro= {df_0.generated_intro.values}\") # prompt {7 tokens} + generated_text\n",
411
+ "print()\n",
412
+ "print(f\"prompt= {df_0.prompt.values}\")\n",
413
+ "print()\n",
414
+ "print(f\"generated_text= {df_0.generated_text.values}\")\n"
415
+ ]
416
+ },
417
+ {
418
+ "cell_type": "code",
419
+ "execution_count": 5,
420
+ "metadata": {},
421
+ "outputs": [
422
+ {
423
+ "data": {
424
+ "text/html": [
425
+ "<div>\n",
426
+ "<style scoped>\n",
427
+ " .dataframe tbody tr th:only-of-type {\n",
428
+ " vertical-align: middle;\n",
429
+ " }\n",
430
+ "\n",
431
+ " .dataframe tbody tr th {\n",
432
+ " vertical-align: top;\n",
433
+ " }\n",
434
+ "\n",
435
+ " .dataframe thead th {\n",
436
+ " text-align: right;\n",
437
+ " }\n",
438
+ "</style>\n",
439
+ "<table border=\"1\" class=\"dataframe\">\n",
440
+ " <thead>\n",
441
+ " <tr style=\"text-align: right;\">\n",
442
+ " <th></th>\n",
443
+ " <th>Paragraph</th>\n",
444
+ " <th>AI_generated</th>\n",
445
+ " </tr>\n",
446
+ " </thead>\n",
447
+ " <tbody>\n",
448
+ " <tr>\n",
449
+ " <th>0</th>\n",
450
+ " <td>Sexhow railway station was a railway station b...</td>\n",
451
+ " <td>0</td>\n",
452
+ " </tr>\n",
453
+ " <tr>\n",
454
+ " <th>1</th>\n",
455
+ " <td>In Finnish folklore, all places and things, an...</td>\n",
456
+ " <td>0</td>\n",
457
+ " </tr>\n",
458
+ " <tr>\n",
459
+ " <th>2</th>\n",
460
+ " <td>In mathematics, specifically differential calc...</td>\n",
461
+ " <td>0</td>\n",
462
+ " </tr>\n",
463
+ " <tr>\n",
464
+ " <th>3</th>\n",
465
+ " <td>is a Japanese shōjo manga series written and i...</td>\n",
466
+ " <td>0</td>\n",
467
+ " </tr>\n",
468
+ " <tr>\n",
469
+ " <th>4</th>\n",
470
+ " <td>Robert Milner \"Rob\" Bradley, Jr. (born August ...</td>\n",
471
+ " <td>0</td>\n",
472
+ " </tr>\n",
473
+ " <tr>\n",
474
+ " <th>...</th>\n",
475
+ " <td>...</td>\n",
476
+ " <td>...</td>\n",
477
+ " </tr>\n",
478
+ " <tr>\n",
479
+ " <th>149995</th>\n",
480
+ " <td>Randy Borum is a Professor and Coordinator of ...</td>\n",
481
+ " <td>0</td>\n",
482
+ " </tr>\n",
483
+ " <tr>\n",
484
+ " <th>149996</th>\n",
485
+ " <td>Sa'och (, also, \"Sauch\") is an endangered, nea...</td>\n",
486
+ " <td>0</td>\n",
487
+ " </tr>\n",
488
+ " <tr>\n",
489
+ " <th>149997</th>\n",
490
+ " <td>Philip C. Hanawalt (born 1931) is an American ...</td>\n",
491
+ " <td>0</td>\n",
492
+ " </tr>\n",
493
+ " <tr>\n",
494
+ " <th>149998</th>\n",
495
+ " <td>Vossius Gymnasium is a public gymnasium in Ams...</td>\n",
496
+ " <td>0</td>\n",
497
+ " </tr>\n",
498
+ " <tr>\n",
499
+ " <th>149999</th>\n",
500
+ " <td>Simone Stratigo (, Symeon Filippos Stratigos; ...</td>\n",
501
+ " <td>0</td>\n",
502
+ " </tr>\n",
503
+ " </tbody>\n",
504
+ "</table>\n",
505
+ "<p>150000 rows × 2 columns</p>\n",
506
+ "</div>"
507
+ ],
508
+ "text/plain": [
509
+ " Paragraph AI_generated\n",
510
+ "0 Sexhow railway station was a railway station b... 0\n",
511
+ "1 In Finnish folklore, all places and things, an... 0\n",
512
+ "2 In mathematics, specifically differential calc... 0\n",
513
+ "3 is a Japanese shōjo manga series written and i... 0\n",
514
+ "4 Robert Milner \"Rob\" Bradley, Jr. (born August ... 0\n",
515
+ "... ... ...\n",
516
+ "149995 Randy Borum is a Professor and Coordinator of ... 0\n",
517
+ "149996 Sa'och (, also, \"Sauch\") is an endangered, nea... 0\n",
518
+ "149997 Philip C. Hanawalt (born 1931) is an American ... 0\n",
519
+ "149998 Vossius Gymnasium is a public gymnasium in Ams... 0\n",
520
+ "149999 Simone Stratigo (, Symeon Filippos Stratigos; ... 0\n",
521
+ "\n",
522
+ "[150000 rows x 2 columns]"
523
+ ]
524
+ },
525
+ "execution_count": 5,
526
+ "metadata": {},
527
+ "output_type": "execute_result"
528
+ }
529
+ ],
530
+ "source": [
531
+ "new_df_human= pd.DataFrame({\n",
532
+ " \"Paragraph\": df.wiki_intro, \n",
533
+ " \"AI_generated\": pd.Series([0]*len(df))\n",
534
+ " \n",
535
+ " }\n",
536
+ ")\n",
537
+ "new_df_human"
538
+ ]
539
+ },
540
+ {
541
+ "cell_type": "code",
542
+ "execution_count": 6,
543
+ "metadata": {},
544
+ "outputs": [
545
+ {
546
+ "data": {
547
+ "text/html": [
548
+ "<div>\n",
549
+ "<style scoped>\n",
550
+ " .dataframe tbody tr th:only-of-type {\n",
551
+ " vertical-align: middle;\n",
552
+ " }\n",
553
+ "\n",
554
+ " .dataframe tbody tr th {\n",
555
+ " vertical-align: top;\n",
556
+ " }\n",
557
+ "\n",
558
+ " .dataframe thead th {\n",
559
+ " text-align: right;\n",
560
+ " }\n",
561
+ "</style>\n",
562
+ "<table border=\"1\" class=\"dataframe\">\n",
563
+ " <thead>\n",
564
+ " <tr style=\"text-align: right;\">\n",
565
+ " <th></th>\n",
566
+ " <th>Paragraph</th>\n",
567
+ " <th>AI_generated</th>\n",
568
+ " </tr>\n",
569
+ " </thead>\n",
570
+ " <tbody>\n",
571
+ " <tr>\n",
572
+ " <th>0</th>\n",
573
+ " <td>Sexhow railway station was a railway station l...</td>\n",
574
+ " <td>1</td>\n",
575
+ " </tr>\n",
576
+ " <tr>\n",
577
+ " <th>1</th>\n",
578
+ " <td>In Finnish folklore, all places and things, an...</td>\n",
579
+ " <td>1</td>\n",
580
+ " </tr>\n",
581
+ " <tr>\n",
582
+ " <th>2</th>\n",
583
+ " <td>In mathematics, specifically differential calc...</td>\n",
584
+ " <td>1</td>\n",
585
+ " </tr>\n",
586
+ " <tr>\n",
587
+ " <th>3</th>\n",
588
+ " <td>is a Japanese shōjo manga series written and i...</td>\n",
589
+ " <td>1</td>\n",
590
+ " </tr>\n",
591
+ " <tr>\n",
592
+ " <th>4</th>\n",
593
+ " <td>Robert Milner \"Rob\" Bradley, Jr. (born August ...</td>\n",
594
+ " <td>1</td>\n",
595
+ " </tr>\n",
596
+ " <tr>\n",
597
+ " <th>...</th>\n",
598
+ " <td>...</td>\n",
599
+ " <td>...</td>\n",
600
+ " </tr>\n",
601
+ " <tr>\n",
602
+ " <th>149995</th>\n",
603
+ " <td>Randy Borum is a Professor and Coordinator of ...</td>\n",
604
+ " <td>1</td>\n",
605
+ " </tr>\n",
606
+ " <tr>\n",
607
+ " <th>149996</th>\n",
608
+ " <td>Sa'och (, also, \"Sauch\") is an endangered, nuc...</td>\n",
609
+ " <td>1</td>\n",
610
+ " </tr>\n",
611
+ " <tr>\n",
612
+ " <th>149997</th>\n",
613
+ " <td>Philip C. Hanawalt (born 1931) is an American ...</td>\n",
614
+ " <td>1</td>\n",
615
+ " </tr>\n",
616
+ " <tr>\n",
617
+ " <th>149998</th>\n",
618
+ " <td>Vossius Gymnasium is a public gymnasium in the...</td>\n",
619
+ " <td>1</td>\n",
620
+ " </tr>\n",
621
+ " <tr>\n",
622
+ " <th>149999</th>\n",
623
+ " <td>Simone Stratigo (, Symeon Filippos Stratigos; ...</td>\n",
624
+ " <td>1</td>\n",
625
+ " </tr>\n",
626
+ " </tbody>\n",
627
+ "</table>\n",
628
+ "<p>150000 rows × 2 columns</p>\n",
629
+ "</div>"
630
+ ],
631
+ "text/plain": [
632
+ " Paragraph AI_generated\n",
633
+ "0 Sexhow railway station was a railway station l... 1\n",
634
+ "1 In Finnish folklore, all places and things, an... 1\n",
635
+ "2 In mathematics, specifically differential calc... 1\n",
636
+ "3 is a Japanese shōjo manga series written and i... 1\n",
637
+ "4 Robert Milner \"Rob\" Bradley, Jr. (born August ... 1\n",
638
+ "... ... ...\n",
639
+ "149995 Randy Borum is a Professor and Coordinator of ... 1\n",
640
+ "149996 Sa'och (, also, \"Sauch\") is an endangered, nuc... 1\n",
641
+ "149997 Philip C. Hanawalt (born 1931) is an American ... 1\n",
642
+ "149998 Vossius Gymnasium is a public gymnasium in the... 1\n",
643
+ "149999 Simone Stratigo (, Symeon Filippos Stratigos; ... 1\n",
644
+ "\n",
645
+ "[150000 rows x 2 columns]"
646
+ ]
647
+ },
648
+ "execution_count": 6,
649
+ "metadata": {},
650
+ "output_type": "execute_result"
651
+ }
652
+ ],
653
+ "source": [
654
+ "new_df_ai= pd.DataFrame({\n",
655
+ " \"Paragraph\": df.generated_intro, \n",
656
+ " \"AI_generated\": pd.Series([1]*len(df))\n",
657
+ " \n",
658
+ " }\n",
659
+ ")\n",
660
+ "new_df_ai"
661
+ ]
662
+ },
663
+ {
664
+ "cell_type": "code",
665
+ "execution_count": 7,
666
+ "metadata": {},
667
+ "outputs": [
668
+ {
669
+ "data": {
670
+ "text/plain": [
671
+ "array([\"In Finnish folklore, all places and things, and also human beings, have a haltija (a genius, guardian spirit) of their own. One such haltija is called etiäinen—an image, doppelgänger, or just an impression that goes ahead of a person, doing things the person in question later does. For example, people waiting at home might hear the door close or even see a shadow or a silhouette, only to realize that no one has yet arrived. Etiäinen can also refer to some kind of a feeling that something is going to happen. Sometimes it could, for example, warn of a bad year coming. In modern Finnish, the term has detached from its shamanistic origins and refers to premonition. Unlike clairvoyance, divination, and similar practices, etiäiset (plural) are spontaneous and can't be induced. Quite the opposite, they may be unwanted and cause anxiety, like ghosts. Etiäiset need not be too dramatic and may concern everyday events, although ones related to e.g. deaths are common. As these phenomena are still reported today, they can be considered a living tradition, as a way to explain the psychological experience of premonition.\"],\n",
672
+ " dtype=object)"
673
+ ]
674
+ },
675
+ "execution_count": 7,
676
+ "metadata": {},
677
+ "output_type": "execute_result"
678
+ }
679
+ ],
680
+ "source": [
681
+ "new_df_human.iloc[[1]].Paragraph.values"
682
+ ]
683
+ },
684
+ {
685
+ "cell_type": "code",
686
+ "execution_count": 8,
687
+ "metadata": {},
688
+ "outputs": [
689
+ {
690
+ "data": {
691
+ "text/plain": [
692
+ "array(['In Finnish folklore, all places and things, animate or inanimate, have a spirit or \"etiäinen\" that lives there. Etiäinen can manifest in many forms, but is usually described as a kind, elderly woman with white hair. She is the guardian of natural places and often helps people in need. \\n\\nEtiäinen has been a part of Finnish culture for centuries and is still widely believed in today. Folklorists study etiäinen to understand Finnish traditions and how they have changed over time.'],\n",
693
+ " dtype=object)"
694
+ ]
695
+ },
696
+ "execution_count": 8,
697
+ "metadata": {},
698
+ "output_type": "execute_result"
699
+ }
700
+ ],
701
+ "source": [
702
+ "new_df_ai.iloc[[1]].Paragraph.values\n"
703
+ ]
704
+ },
705
+ {
706
+ "cell_type": "code",
707
+ "execution_count": 9,
708
+ "metadata": {},
709
+ "outputs": [
710
+ {
711
+ "data": {
712
+ "text/html": [
713
+ "<div>\n",
714
+ "<style scoped>\n",
715
+ " .dataframe tbody tr th:only-of-type {\n",
716
+ " vertical-align: middle;\n",
717
+ " }\n",
718
+ "\n",
719
+ " .dataframe tbody tr th {\n",
720
+ " vertical-align: top;\n",
721
+ " }\n",
722
+ "\n",
723
+ " .dataframe thead th {\n",
724
+ " text-align: right;\n",
725
+ " }\n",
726
+ "</style>\n",
727
+ "<table border=\"1\" class=\"dataframe\">\n",
728
+ " <thead>\n",
729
+ " <tr style=\"text-align: right;\">\n",
730
+ " <th></th>\n",
731
+ " <th>Paragraph</th>\n",
732
+ " <th>AI_generated</th>\n",
733
+ " </tr>\n",
734
+ " </thead>\n",
735
+ " <tbody>\n",
736
+ " <tr>\n",
737
+ " <th>0</th>\n",
738
+ " <td>Sexhow railway station was a railway station l...</td>\n",
739
+ " <td>1</td>\n",
740
+ " </tr>\n",
741
+ " <tr>\n",
742
+ " <th>1</th>\n",
743
+ " <td>In Finnish folklore, all places and things, an...</td>\n",
744
+ " <td>1</td>\n",
745
+ " </tr>\n",
746
+ " <tr>\n",
747
+ " <th>2</th>\n",
748
+ " <td>In mathematics, specifically differential calc...</td>\n",
749
+ " <td>1</td>\n",
750
+ " </tr>\n",
751
+ " <tr>\n",
752
+ " <th>3</th>\n",
753
+ " <td>is a Japanese shōjo manga series written and i...</td>\n",
754
+ " <td>1</td>\n",
755
+ " </tr>\n",
756
+ " <tr>\n",
757
+ " <th>4</th>\n",
758
+ " <td>Robert Milner \"Rob\" Bradley, Jr. (born August ...</td>\n",
759
+ " <td>1</td>\n",
760
+ " </tr>\n",
761
+ " <tr>\n",
762
+ " <th>...</th>\n",
763
+ " <td>...</td>\n",
764
+ " <td>...</td>\n",
765
+ " </tr>\n",
766
+ " <tr>\n",
767
+ " <th>149995</th>\n",
768
+ " <td>Randy Borum is a Professor and Coordinator of ...</td>\n",
769
+ " <td>0</td>\n",
770
+ " </tr>\n",
771
+ " <tr>\n",
772
+ " <th>149996</th>\n",
773
+ " <td>Sa'och (, also, \"Sauch\") is an endangered, nea...</td>\n",
774
+ " <td>0</td>\n",
775
+ " </tr>\n",
776
+ " <tr>\n",
777
+ " <th>149997</th>\n",
778
+ " <td>Philip C. Hanawalt (born 1931) is an American ...</td>\n",
779
+ " <td>0</td>\n",
780
+ " </tr>\n",
781
+ " <tr>\n",
782
+ " <th>149998</th>\n",
783
+ " <td>Vossius Gymnasium is a public gymnasium in Ams...</td>\n",
784
+ " <td>0</td>\n",
785
+ " </tr>\n",
786
+ " <tr>\n",
787
+ " <th>149999</th>\n",
788
+ " <td>Simone Stratigo (, Symeon Filippos Stratigos; ...</td>\n",
789
+ " <td>0</td>\n",
790
+ " </tr>\n",
791
+ " </tbody>\n",
792
+ "</table>\n",
793
+ "<p>300000 rows × 2 columns</p>\n",
794
+ "</div>"
795
+ ],
796
+ "text/plain": [
797
+ " Paragraph AI_generated\n",
798
+ "0 Sexhow railway station was a railway station l... 1\n",
799
+ "1 In Finnish folklore, all places and things, an... 1\n",
800
+ "2 In mathematics, specifically differential calc... 1\n",
801
+ "3 is a Japanese shōjo manga series written and i... 1\n",
802
+ "4 Robert Milner \"Rob\" Bradley, Jr. (born August ... 1\n",
803
+ "... ... ...\n",
804
+ "149995 Randy Borum is a Professor and Coordinator of ... 0\n",
805
+ "149996 Sa'och (, also, \"Sauch\") is an endangered, nea... 0\n",
806
+ "149997 Philip C. Hanawalt (born 1931) is an American ... 0\n",
807
+ "149998 Vossius Gymnasium is a public gymnasium in Ams... 0\n",
808
+ "149999 Simone Stratigo (, Symeon Filippos Stratigos; ... 0\n",
809
+ "\n",
810
+ "[300000 rows x 2 columns]"
811
+ ]
812
+ },
813
+ "execution_count": 9,
814
+ "metadata": {},
815
+ "output_type": "execute_result"
816
+ }
817
+ ],
818
+ "source": [
819
+ "concat_df= pd.concat([new_df_ai, new_df_human])\n",
820
+ "concat_df"
821
+ ]
822
+ },
823
+ {
824
+ "cell_type": "code",
825
+ "execution_count": 10,
826
+ "metadata": {},
827
+ "outputs": [
828
+ {
829
+ "data": {
830
+ "text/html": [
831
+ "<div>\n",
832
+ "<style scoped>\n",
833
+ " .dataframe tbody tr th:only-of-type {\n",
834
+ " vertical-align: middle;\n",
835
+ " }\n",
836
+ "\n",
837
+ " .dataframe tbody tr th {\n",
838
+ " vertical-align: top;\n",
839
+ " }\n",
840
+ "\n",
841
+ " .dataframe thead th {\n",
842
+ " text-align: right;\n",
843
+ " }\n",
844
+ "</style>\n",
845
+ "<table border=\"1\" class=\"dataframe\">\n",
846
+ " <thead>\n",
847
+ " <tr style=\"text-align: right;\">\n",
848
+ " <th></th>\n",
849
+ " <th>Paragraph</th>\n",
850
+ " <th>AI_generated</th>\n",
851
+ " </tr>\n",
852
+ " </thead>\n",
853
+ " <tbody>\n",
854
+ " <tr>\n",
855
+ " <th>0</th>\n",
856
+ " <td>Sexhow railway station was a railway station l...</td>\n",
857
+ " <td>1</td>\n",
858
+ " </tr>\n",
859
+ " </tbody>\n",
860
+ "</table>\n",
861
+ "</div>"
862
+ ],
863
+ "text/plain": [
864
+ " Paragraph AI_generated\n",
865
+ "0 Sexhow railway station was a railway station l... 1"
866
+ ]
867
+ },
868
+ "execution_count": 10,
869
+ "metadata": {},
870
+ "output_type": "execute_result"
871
+ }
872
+ ],
873
+ "source": [
874
+ "concat_df.iloc[[0000]]\n"
875
+ ]
876
+ },
877
+ {
878
+ "cell_type": "code",
879
+ "execution_count": 11,
880
+ "metadata": {},
881
+ "outputs": [
882
+ {
883
+ "data": {
884
+ "text/html": [
885
+ "<div>\n",
886
+ "<style scoped>\n",
887
+ " .dataframe tbody tr th:only-of-type {\n",
888
+ " vertical-align: middle;\n",
889
+ " }\n",
890
+ "\n",
891
+ " .dataframe tbody tr th {\n",
892
+ " vertical-align: top;\n",
893
+ " }\n",
894
+ "\n",
895
+ " .dataframe thead th {\n",
896
+ " text-align: right;\n",
897
+ " }\n",
898
+ "</style>\n",
899
+ "<table border=\"1\" class=\"dataframe\">\n",
900
+ " <thead>\n",
901
+ " <tr style=\"text-align: right;\">\n",
902
+ " <th></th>\n",
903
+ " <th>Paragraph</th>\n",
904
+ " <th>AI_generated</th>\n",
905
+ " </tr>\n",
906
+ " </thead>\n",
907
+ " <tbody>\n",
908
+ " <tr>\n",
909
+ " <th>0</th>\n",
910
+ " <td>Sexhow railway station was a railway station b...</td>\n",
911
+ " <td>0</td>\n",
912
+ " </tr>\n",
913
+ " </tbody>\n",
914
+ "</table>\n",
915
+ "</div>"
916
+ ],
917
+ "text/plain": [
918
+ " Paragraph AI_generated\n",
919
+ "0 Sexhow railway station was a railway station b... 0"
920
+ ]
921
+ },
922
+ "execution_count": 11,
923
+ "metadata": {},
924
+ "output_type": "execute_result"
925
+ }
926
+ ],
927
+ "source": [
928
+ "concat_df.iloc[[150000]]"
929
+ ]
930
+ },
931
+ {
932
+ "cell_type": "code",
933
+ "execution_count": 12,
934
+ "metadata": {},
935
+ "outputs": [],
936
+ "source": [
937
+ "cdf_shffeled= concat_df.sample(frac=1).reset_index(drop=True)"
938
+ ]
939
+ },
940
+ {
941
+ "cell_type": "code",
942
+ "execution_count": 13,
943
+ "metadata": {},
944
+ "outputs": [
945
+ {
946
+ "data": {
947
+ "text/html": [
948
+ "<div>\n",
949
+ "<style scoped>\n",
950
+ " .dataframe tbody tr th:only-of-type {\n",
951
+ " vertical-align: middle;\n",
952
+ " }\n",
953
+ "\n",
954
+ " .dataframe tbody tr th {\n",
955
+ " vertical-align: top;\n",
956
+ " }\n",
957
+ "\n",
958
+ " .dataframe thead th {\n",
959
+ " text-align: right;\n",
960
+ " }\n",
961
+ "</style>\n",
962
+ "<table border=\"1\" class=\"dataframe\">\n",
963
+ " <thead>\n",
964
+ " <tr style=\"text-align: right;\">\n",
965
+ " <th></th>\n",
966
+ " <th>Paragraph</th>\n",
967
+ " <th>AI_generated</th>\n",
968
+ " </tr>\n",
969
+ " </thead>\n",
970
+ " <tbody>\n",
971
+ " <tr>\n",
972
+ " <th>0</th>\n",
973
+ " <td>Sexhow railway station was a railway station l...</td>\n",
974
+ " <td>1</td>\n",
975
+ " </tr>\n",
976
+ " <tr>\n",
977
+ " <th>1</th>\n",
978
+ " <td>In Finnish folklore, all places and things, an...</td>\n",
979
+ " <td>1</td>\n",
980
+ " </tr>\n",
981
+ " <tr>\n",
982
+ " <th>2</th>\n",
983
+ " <td>In mathematics, specifically differential calc...</td>\n",
984
+ " <td>1</td>\n",
985
+ " </tr>\n",
986
+ " <tr>\n",
987
+ " <th>3</th>\n",
988
+ " <td>is a Japanese shōjo manga series written and i...</td>\n",
989
+ " <td>1</td>\n",
990
+ " </tr>\n",
991
+ " <tr>\n",
992
+ " <th>4</th>\n",
993
+ " <td>Robert Milner \"Rob\" Bradley, Jr. (born August ...</td>\n",
994
+ " <td>1</td>\n",
995
+ " </tr>\n",
996
+ " <tr>\n",
997
+ " <th>...</th>\n",
998
+ " <td>...</td>\n",
999
+ " <td>...</td>\n",
1000
+ " </tr>\n",
1001
+ " <tr>\n",
1002
+ " <th>149995</th>\n",
1003
+ " <td>Randy Borum is a Professor and Coordinator of ...</td>\n",
1004
+ " <td>0</td>\n",
1005
+ " </tr>\n",
1006
+ " <tr>\n",
1007
+ " <th>149996</th>\n",
1008
+ " <td>Sa'och (, also, \"Sauch\") is an endangered, nea...</td>\n",
1009
+ " <td>0</td>\n",
1010
+ " </tr>\n",
1011
+ " <tr>\n",
1012
+ " <th>149997</th>\n",
1013
+ " <td>Philip C. Hanawalt (born 1931) is an American ...</td>\n",
1014
+ " <td>0</td>\n",
1015
+ " </tr>\n",
1016
+ " <tr>\n",
1017
+ " <th>149998</th>\n",
1018
+ " <td>Vossius Gymnasium is a public gymnasium in Ams...</td>\n",
1019
+ " <td>0</td>\n",
1020
+ " </tr>\n",
1021
+ " <tr>\n",
1022
+ " <th>149999</th>\n",
1023
+ " <td>Simone Stratigo (, Symeon Filippos Stratigos; ...</td>\n",
1024
+ " <td>0</td>\n",
1025
+ " </tr>\n",
1026
+ " </tbody>\n",
1027
+ "</table>\n",
1028
+ "<p>300000 rows × 2 columns</p>\n",
1029
+ "</div>"
1030
+ ],
1031
+ "text/plain": [
1032
+ " Paragraph AI_generated\n",
1033
+ "0 Sexhow railway station was a railway station l... 1\n",
1034
+ "1 In Finnish folklore, all places and things, an... 1\n",
1035
+ "2 In mathematics, specifically differential calc... 1\n",
1036
+ "3 is a Japanese shōjo manga series written and i... 1\n",
1037
+ "4 Robert Milner \"Rob\" Bradley, Jr. (born August ... 1\n",
1038
+ "... ... ...\n",
1039
+ "149995 Randy Borum is a Professor and Coordinator of ... 0\n",
1040
+ "149996 Sa'och (, also, \"Sauch\") is an endangered, nea... 0\n",
1041
+ "149997 Philip C. Hanawalt (born 1931) is an American ... 0\n",
1042
+ "149998 Vossius Gymnasium is a public gymnasium in Ams... 0\n",
1043
+ "149999 Simone Stratigo (, Symeon Filippos Stratigos; ... 0\n",
1044
+ "\n",
1045
+ "[300000 rows x 2 columns]"
1046
+ ]
1047
+ },
1048
+ "execution_count": 13,
1049
+ "metadata": {},
1050
+ "output_type": "execute_result"
1051
+ }
1052
+ ],
1053
+ "source": [
1054
+ "concat_df"
1055
+ ]
1056
+ },
1057
+ {
1058
+ "cell_type": "code",
1059
+ "execution_count": 14,
1060
+ "metadata": {},
1061
+ "outputs": [
1062
+ {
1063
+ "data": {
1064
+ "text/html": [
1065
+ "<div>\n",
1066
+ "<style scoped>\n",
1067
+ " .dataframe tbody tr th:only-of-type {\n",
1068
+ " vertical-align: middle;\n",
1069
+ " }\n",
1070
+ "\n",
1071
+ " .dataframe tbody tr th {\n",
1072
+ " vertical-align: top;\n",
1073
+ " }\n",
1074
+ "\n",
1075
+ " .dataframe thead th {\n",
1076
+ " text-align: right;\n",
1077
+ " }\n",
1078
+ "</style>\n",
1079
+ "<table border=\"1\" class=\"dataframe\">\n",
1080
+ " <thead>\n",
1081
+ " <tr style=\"text-align: right;\">\n",
1082
+ " <th></th>\n",
1083
+ " <th>Paragraph</th>\n",
1084
+ " <th>AI_generated</th>\n",
1085
+ " </tr>\n",
1086
+ " </thead>\n",
1087
+ " <tbody>\n",
1088
+ " <tr>\n",
1089
+ " <th>0</th>\n",
1090
+ " <td>Bentivoglio is an Italian surname. Notable peo...</td>\n",
1091
+ " <td>0</td>\n",
1092
+ " </tr>\n",
1093
+ " <tr>\n",
1094
+ " <th>1</th>\n",
1095
+ " <td>Stephanie McCallum (born Sydney, Australia, 3...</td>\n",
1096
+ " <td>0</td>\n",
1097
+ " </tr>\n",
1098
+ " <tr>\n",
1099
+ " <th>2</th>\n",
1100
+ " <td>Abdāl lit: substitutes, but which can also be...</td>\n",
1101
+ " <td>1</td>\n",
1102
+ " </tr>\n",
1103
+ " <tr>\n",
1104
+ " <th>3</th>\n",
1105
+ " <td>The Gadget is a young adult historical novel w...</td>\n",
1106
+ " <td>1</td>\n",
1107
+ " </tr>\n",
1108
+ " <tr>\n",
1109
+ " <th>4</th>\n",
1110
+ " <td>The A-1 lifeboat was a powered lifeboat that w...</td>\n",
1111
+ " <td>0</td>\n",
1112
+ " </tr>\n",
1113
+ " <tr>\n",
1114
+ " <th>...</th>\n",
1115
+ " <td>...</td>\n",
1116
+ " <td>...</td>\n",
1117
+ " </tr>\n",
1118
+ " <tr>\n",
1119
+ " <th>299995</th>\n",
1120
+ " <td>James Wrighten (b. 1745 - d. 1793) was an Amer...</td>\n",
1121
+ " <td>1</td>\n",
1122
+ " </tr>\n",
1123
+ " <tr>\n",
1124
+ " <th>299996</th>\n",
1125
+ " <td>The U-matrix (unified distance matrix) is a sy...</td>\n",
1126
+ " <td>1</td>\n",
1127
+ " </tr>\n",
1128
+ " <tr>\n",
1129
+ " <th>299997</th>\n",
1130
+ " <td>Holding Trevor is a 2007 American gay-themed p...</td>\n",
1131
+ " <td>1</td>\n",
1132
+ " </tr>\n",
1133
+ " <tr>\n",
1134
+ " <th>299998</th>\n",
1135
+ " <td>Jarvisfield is a heritage-listed former pastor...</td>\n",
1136
+ " <td>0</td>\n",
1137
+ " </tr>\n",
1138
+ " <tr>\n",
1139
+ " <th>299999</th>\n",
1140
+ " <td>The Silver Guardian is a Chinese web series c...</td>\n",
1141
+ " <td>1</td>\n",
1142
+ " </tr>\n",
1143
+ " </tbody>\n",
1144
+ "</table>\n",
1145
+ "<p>300000 rows × 2 columns</p>\n",
1146
+ "</div>"
1147
+ ],
1148
+ "text/plain": [
1149
+ " Paragraph AI_generated\n",
1150
+ "0 Bentivoglio is an Italian surname. Notable peo... 0\n",
1151
+ "1 Stephanie McCallum (born Sydney, Australia, 3... 0\n",
1152
+ "2 Abdāl lit: substitutes, but which can also be... 1\n",
1153
+ "3 The Gadget is a young adult historical novel w... 1\n",
1154
+ "4 The A-1 lifeboat was a powered lifeboat that w... 0\n",
1155
+ "... ... ...\n",
1156
+ "299995 James Wrighten (b. 1745 - d. 1793) was an Amer... 1\n",
1157
+ "299996 The U-matrix (unified distance matrix) is a sy... 1\n",
1158
+ "299997 Holding Trevor is a 2007 American gay-themed p... 1\n",
1159
+ "299998 Jarvisfield is a heritage-listed former pastor... 0\n",
1160
+ "299999 The Silver Guardian is a Chinese web series c... 1\n",
1161
+ "\n",
1162
+ "[300000 rows x 2 columns]"
1163
+ ]
1164
+ },
1165
+ "execution_count": 14,
1166
+ "metadata": {},
1167
+ "output_type": "execute_result"
1168
+ }
1169
+ ],
1170
+ "source": [
1171
+ "cdf_shffeled"
1172
+ ]
1173
+ },
1174
+ {
1175
+ "cell_type": "code",
1176
+ "execution_count": 15,
1177
+ "metadata": {},
1178
+ "outputs": [],
1179
+ "source": [
1180
+ "cdf_shffeled.to_csv(\"data/AI_checker_remade.csv\", index=False)"
1181
+ ]
1182
+ },
1183
+ {
1184
+ "cell_type": "code",
1185
+ "execution_count": null,
1186
+ "metadata": {},
1187
+ "outputs": [],
1188
+ "source": []
1189
+ },
1190
+ {
1191
+ "cell_type": "code",
1192
+ "execution_count": null,
1193
+ "metadata": {},
1194
+ "outputs": [],
1195
+ "source": []
1196
+ }
1197
+ ],
1198
+ "metadata": {
1199
+ "kernelspec": {
1200
+ "display_name": "venv",
1201
+ "language": "python",
1202
+ "name": "python3"
1203
+ },
1204
+ "language_info": {
1205
+ "codemirror_mode": {
1206
+ "name": "ipython",
1207
+ "version": 3
1208
+ },
1209
+ "file_extension": ".py",
1210
+ "mimetype": "text/x-python",
1211
+ "name": "python",
1212
+ "nbconvert_exporter": "python",
1213
+ "pygments_lexer": "ipython3",
1214
+ "version": "3.10.12"
1215
+ },
1216
+ "orig_nbformat": 4
1217
+ },
1218
+ "nbformat": 4,
1219
+ "nbformat_minor": 2
1220
+ }