File size: 7,060 Bytes
890afdf
 
 
 
 
 
 
 
 
b08befe
08b2f5e
 
 
 
 
 
 
 
 
 
 
 
 
 
be961d6
 
08b2f5e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
be961d6
08b2f5e
 
 
 
be961d6
08b2f5e
be961d6
 
 
 
 
 
 
 
 
08b2f5e
 
 
be961d6
 
08b2f5e
be961d6
08b2f5e
 
 
 
 
 
 
 
 
 
 
 
 
 
be961d6
08b2f5e
 
 
 
be961d6
08b2f5e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1be8763
bb77034
b3aa200
bb77034
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
---
license: apache-2.0
language:
  - en
pipeline_tag: text-generation
tags:
  - chat
---

# Qwen2-1.5B-Finetuned

## Training details
datasets:
<pre>
- alpaca-gpt4_cleaned-qwen2-train.jsonl
- alpaca-gpt4_cleaned-qwen2-val.jsonl
- xlam-dataset-60k-qwen2-train.jsonl
- xlam-dataset-60k-qwen2-val.jsonl
* 9/1 train/eval ratio.
</pre>

## Quickstart

### utils for user content.
- replace [TRIPLE_BACKTICK] to ```

```python
xlam_system = (
    "You are an AI assistant for function calling. "
    "For politically sensitive questions, security and privacy issues, "
    "and other non-computer science questions, you will refuse to answer"
)

def to_xlam_tools(tools:list|dict):
    if not isinstance(tools, list): tools = [tools]
    xlam_tools = []
    for tool in tools:
        assert isinstance(tool, dict)
        xlam_tools.append( {
            "name": tools["name"],
            "description": tools["description"],
            "parameters": {k: v for k, v in tools["parameters"].get("properties", {}).items()}
        })
    return xlam_tools

TASK_INSTRUCTION = '''You are an expert in composing functions. You are given a question and a set of possible functions.
Based on the question, you will need to make one or more function/tool calls to achieve the purpose.
If none of the functions can be used, point it out and refuse to answer.
If the given question lacks the parameters required by the function, fill the parameters as None.'''

FORMAT_INSTRUCTION = '''The output MUST strictly adhere to the following JSON format, and NO other text MUST be included.
The example format is as follows. Please make sure the parameter type is correct. If no function call is needed, please make tool_calls an empty list '[]'.
[TRIPLE_BACKTICK]
{ "tool_calls": [
    {"name": "func_name1", "arguments": {"argument1": "value1", "argument2": "value2"}},
    ... (more tool calls as required)
] }
[TRIPLE_BACKTICK]
'''

def get_prompt(xlam_tools:list|dict, query:str ):
    if not isinstance(xlam_tools, str): xlam_tools = json.dumps(xlam_tools)
    prompt = f"<instruction>\n{TASK_INSTRUCTION}\n</instruction>\n\n"
    prompt += f"<available tools>\n{xlam_tools}\n</available tools>\n\n"
    prompt += f"<tool format>\n{FORMAT_INSTRUCTION}\n</tool format>\n\n"
    prompt += f"<query>\n{query.strip()}\n<query>\n\n"
    return prompt

```

### inference
- replace [TRIPLE_BACKTICK] to ```

```python
from transformers import AutoModelForCausalLM, AutoTokenizer
user_msg = '''<instruction>
You are an expert in composing functions. You are given a question and a set of possible functions.
Based on the question, you will need to make one or more function/tool calls to achieve the purpose.
If none of the functions can be used, point it out and refuse to answer.
If the given question lacks the parameters required by the function, fill the parameters as None.
</instruction>

<available tools>
[{"name": "messages_from_telegram_channel", "description": "Fetches the last 10 messages or a specific message from a given public Telegram channel.", "parameters": {"channel": {"description": "The @username of the public Telegram channel.", "type": "str", "default": "telegram"}, "idmessage": {"description": "The ID of a specific message to retrieve. If not provided, the function will return the last 10 messages.", "type": "str, optional", "default": ""}}}, {"name": "shopify", "description": "Checks the availability of a given username on Shopify using the Toolbench RapidAPI.", "parameters": {"username": {"description": "The username to check for availability on Shopify.", "type": "str", "default": "username"}}}, {"name": "generate_a_face", "description": "Generates a face image using an AI service and returns the result as a JSON object or text. It utilizes the Toolbench RapidAPI service.", "parameters": {"ai": {"description": "The AI model identifier to be used for face generation.", "type": "str", "default": "1"}}}]
</available tools>

<tool format>
The output MUST strictly adhere to the following JSON format, and NO other text MUST be included.
The example format is as follows. Please make sure the parameter type is correct. If no function call is needed, please make tool_calls an empty list '[]'.
[TRIPLE_BACKTICK]
{ "tool_calls": [
    {"name": "func_name1", "arguments": {"argument1": "value1", "argument2": "value2"}},
    ... (more tool calls as required)
] }
[TRIPLE_BACKTICK]
</tool format>

<query>
Check if the username 'ShopMaster123' is available on Shopify.
</query>'''

messages = [dict(role='user', content=user_msg)]
label = { "tool_calls": [{"name": "shopify", "arguments": {"username": "ShopMaster123"}}] }


tokenizer = AutoTokenizer.from_pretrained(
    "objects76/qwen2-xlam", trust_remote_code=True)

model = AutoModelForCausalLM.from_pretrained(
    "objects76/qwen2-xlam", trust_remote_code=True,
    torch_dtype="auto",
    device_map="cuda",
)
model.config.use_cache = True
model.eval()

input_ids = tokenizer.apply_chat_template(
                        messages,
                        tokenize=True,
                        add_generation_prompt=True,
                        max_length=tokenizer.model_max_length,
                        padding=False, truncation=True,
                        return_tensors='pt',
                    ).to(model.device)

outputs = model.generate(
    input_ids = input_ids, # attention_mask=attention_mask,
    max_new_tokens=1024,
    eos_token_id=tokenizer.eos_token_id,
    pad_token_id=tokenizer.pad_token_id,
    # do_sample=True, temperature=0.01, top_p= 0.01,
    use_cache=True)

response = tokenizer.decode(outputs[0, input_ids.shape[-1]:], skip_special_tokens=True)
print('response=', response)
print('label=', label)
```


### general instruction-following(few-shot)
```python

system = ('Your task is to extract specific information from the given text.'
          ' Please provide the requested information in the format shown in the examples below.'
        )

fewshot_example = '''\
Example 1:
Text: John Smith is a 35-year-old software engineer from New York. He has been working at TechCorp for 5 years.
Name: John Smith
Age: 35
Occupation: Software Engineer
Location: New York
Company: TechCorp
Years of Experience: 5

Example 2:
Text: Sarah Johnson, a 28-year-old marketing specialist, recently moved to San Francisco to join StartupX as their new Head of Marketing.
Name: Sarah Johnson
Age: 28
Occupation: Marketing Specialist
Location: San Francisco
Company: StartupX
Position: Head of Marketing

Now, extract the information from the following text:
Text: Michael Brown, 42, is a senior data scientist at DataInc in Chicago. He has been in the field for over a decade and specializes in machine learning algorithms.
'''

answer_from_gpt = '''\
Name: Michael Brown
Age: 42
Occupation: Senior Data Scientist
Location: Chicago
Company: DataInc
Years of Experience: Over a decade
Specialization: Machine Learning Algorithms
'''

messages = [
    {"role": "system", "content": system.strip()},
    {"role": "user", "content": fewshot_example.strip()},
]
```