File size: 5,100 Bytes
08b2f5e 1be8763 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 |
# Qwen2-1.5B-Finetuned(0812)
## Training details
datasets:
<pre>
- alpaca-gpt4_cleaned-qwen2-train.jsonl
- alpaca-gpt4_cleaned-qwen2-val.jsonl
- xlam-dataset-60k-qwen2-train.jsonl
- xlam-dataset-60k-qwen2-val.jsonl
* 9/1 train/eval ratio.
</pre>
## Quickstart
### utils for user content.
```python
xlam_system = (
"You are an AI assistant for function calling. "
"For politically sensitive questions, security and privacy issues, "
"and other non-computer science questions, you will refuse to answer"
)
def to_xlam_tools(tools:list|dict):
if not isinstance(tools, list): tools = [tools]
xlam_tools = []
for tool in tools:
assert isinstance(tool, dict)
xlam_tools.append( {
"name": tools["name"],
"description": tools["description"],
"parameters": {k: v for k, v in tools["parameters"].get("properties", {}).items()}
})
return xlam_tools
TASK_INSTRUCTION = '''You are an expert in composing functions. You are given a question and a set of possible functions.
Based on the question, you will need to make one or more function/tool calls to achieve the purpose.
If none of the functions can be used, point it out and refuse to answer.
If the given question lacks the parameters required by the function, fill the parameters as None.'''
FORMAT_INSTRUCTION = '''The output MUST strictly adhere to the following JSON format, and NO other text MUST be included.
The example format is as follows. Please make sure the parameter type is correct. If no function call is needed, please make tool_calls an empty list '[]'.
```
{ "tool_calls": [
{"name": "func_name1", "arguments": {"argument1": "value1", "argument2": "value2"}},
... (more tool calls as required)
] }
```
'''
```
### inference
```python
user_msg = '''<instruction>
You are an expert in composing functions. You are given a question and a set of possible functions.
Based on the question, you will need to make one or more function/tool calls to achieve the purpose.
If none of the functions can be used, point it out and refuse to answer.
If the given question lacks the parameters required by the function, fill the parameters as None.
</instruction>
<available tools>
[{"name": "messages_from_telegram_channel", "description": "Fetches the last 10 messages or a specific message from a given public Telegram channel.", "parameters": {"channel": {"description": "The @username of the public Telegram channel.", "type": "str", "default": "telegram"}, "idmessage": {"description": "The ID of a specific message to retrieve. If not provided, the function will return the last 10 messages.", "type": "str, optional", "default": ""}}}, {"name": "shopify", "description": "Checks the availability of a given username on Shopify using the Toolbench RapidAPI.", "parameters": {"username": {"description": "The username to check for availability on Shopify.", "type": "str", "default": "username"}}}, {"name": "generate_a_face", "description": "Generates a face image using an AI service and returns the result as a JSON object or text. It utilizes the Toolbench RapidAPI service.", "parameters": {"ai": {"description": "The AI model identifier to be used for face generation.", "type": "str", "default": "1"}}}]
</available tools>
<tool format>
The output MUST strictly adhere to the following JSON format, and NO other text MUST be included.
The example format is as follows. Please make sure the parameter type is correct. If no function call is needed, please make tool_calls an empty list '[]'.
```
{ "tool_calls": [
{"name": "func_name1", "arguments": {"argument1": "value1", "argument2": "value2"}},
... (more tool calls as required)
] }
```
</tool format>
<query>
Check if the username 'ShopMaster123' is available on Shopify.
</query>'''
messages = [dict(role='user', content=user_msg)]
label = { "tool_calls": [{"name": "shopify", "arguments": {"username": "ShopMaster123"}}] }
```
```python
from transformers import AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
"objects76/qwen2-xlam", trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
"objects76/qwen2-xlam", trust_remote_code=True,
torch_dtype="auto",
device_map="cuda",
)
model.config.use_cache = True
model.eval()
input_ids = tokenizer.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
max_length=tokenizer.model_max_length,
padding=False, truncation=True,
return_tensors='pt',
).to(model.device)
outputs = model.generate(
input_ids = input_ids, # attention_mask=attention_mask,
max_new_tokens=1024,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.pad_token_id,
# do_sample=True, temperature=0.01, top_p= 0.01,
use_cache=True)
response = tokenizer.decode(outputs[0, input_ids.shape[-1]:], skip_special_tokens=True)
print('response=', response)
print('label=', label)
```
|