File size: 1,270 Bytes
cc06cac
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
name: "preprocessing"
backend: "python"
max_batch_size: 1

input [
    {
        name: "QUERY"
        data_type: TYPE_STRING
        dims: [ -1 ]
    },
    {
        name: "BAD_WORDS_DICT"
        data_type: TYPE_STRING
        dims: [ -1 ]
        optional: true
    },
    {
        name: "STOP_WORDS_DICT"
        data_type: TYPE_STRING
        dims: [ -1 ]
        optional: true
    },
    {
        name: "REQUEST_OUTPUT_LEN"
        data_type: TYPE_UINT32
        dims: [ -1 ]
    }
]
output [
    {
        name: "INPUT_ID"
        data_type: TYPE_UINT32
        dims: [ -1 ]
    },
    {
        name: "REQUEST_INPUT_LEN"
        data_type: TYPE_UINT32
        dims: [ 1 ]
    },
    {
        name: "BAD_WORDS_IDS"
        data_type: TYPE_INT32
        dims: [ 2, -1 ]
    },
    {
        name: "STOP_WORDS_IDS"
        data_type: TYPE_INT32
        dims: [ 2, -1 ]
    },
    {
        name: "REQUEST_OUTPUT_LEN"
        data_type: TYPE_UINT32
        dims: [ -1 ]
    },
    {
        name: "PROMPT_LEARNING_TASK_NAME_IDS"
        data_type: TYPE_UINT32
        dims: [ 1 ]
    }
]

instance_group [
    {
        count: 4
        kind: KIND_CPU
    }
]

parameters {
  key: "tokenizer_path"
  value: {
    string_value: "tokenizer/tokenizer.model"
  }
}