SicariusSicariiStuff commited on
Commit
d63014d
1 Parent(s): 86230b3

Upload How_to_Run.py

Browse files
Files changed (1) hide show
  1. How_to_Run.py +68 -0
How_to_Run.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import transformers
2
+ import torch
3
+
4
+ # Model and tokenizer initialization
5
+ model_path_name = "SicariusSicariiStuff/LLAMA-3_8B_Unaligned_BETA" # Replace with your model path
6
+
7
+ # Initialize the pipeline
8
+ pipeline = transformers.pipeline(
9
+ "text-generation",
10
+ model=model_path_name,
11
+ model_kwargs={"torch_dtype": torch.bfloat16},
12
+ device_map="auto", # Adjust to 'cuda' if needed
13
+ )
14
+
15
+ # Prepare the message list
16
+ message_list = [
17
+ [
18
+ {'role': 'system', 'content': "You are an AI assistant."},
19
+ {'role': 'user', 'content': "Who are you?"}
20
+ ]
21
+ ]
22
+
23
+ # Apply the chat template or manually format the prompts
24
+ try:
25
+ prompts = [
26
+ pipeline.tokenizer.apply_chat_template(
27
+ messages,
28
+ tokenize=False,
29
+ add_generation_prompt=True,
30
+ )
31
+ for messages in message_list
32
+ ]
33
+ except AttributeError:
34
+ # Fallback: Manually format the prompts if `apply_chat_template` is unsupported
35
+ prompts = [
36
+ f"<|im_start|>system\n{msg[0]['content']}<|im_end|>\n"
37
+ f"<|im_start|>user\n{msg[1]['content']}<|im_end|>\n<|im_start|>assistant\n"
38
+ for msg in message_list
39
+ ]
40
+
41
+ # Debugging: Print prompts
42
+ print("Formatted Prompts:", prompts)
43
+
44
+ # Validate tokenizer and model's EOS and PAD token IDs
45
+ eos_token_id = pipeline.tokenizer.eos_token_id or 50256 # Default fallback for GPT-like models
46
+ pad_token_id = eos_token_id # Ensure consistency
47
+ print("EOS Token ID:", eos_token_id)
48
+
49
+ # Tokenize the prompts (optional debugging step)
50
+ tokens = pipeline.tokenizer(prompts, padding=True, return_tensors="pt")
51
+ print("Tokenized Input:", tokens)
52
+
53
+ # Generate the output
54
+ try:
55
+ outputs = pipeline(
56
+ prompts,
57
+ max_new_tokens=100, # Reduce for debugging purposes
58
+ do_sample=True,
59
+ temperature=0.5,
60
+ top_p=0.5,
61
+ eos_token_id=eos_token_id,
62
+ pad_token_id=pad_token_id,
63
+ )
64
+ print("Outputs:", outputs)
65
+ except Exception as e:
66
+ print("Error during generation:", str(e))
67
+
68
+