Update README.md
Browse files
README.md
CHANGED
@@ -64,6 +64,40 @@ test: Dataset({
|
|
64 |
```
|
65 |
|
66 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
|
68 |
## Misc
|
69 |
|
|
|
64 |
```
|
65 |
|
66 |
|
67 |
+
## Example
|
68 |
+
|
69 |
+
Here is example how to use
|
70 |
+
```
|
71 |
+
def pre_process(sample, processor, max_length=512, ignore_id=-100):
|
72 |
+
# create tensor from image
|
73 |
+
try:
|
74 |
+
image = sample["image"].convert('RGB')
|
75 |
+
pixel_values = processor(
|
76 |
+
image, return_tensors="pt").pixel_values.squeeze()
|
77 |
+
except Exception as e:
|
78 |
+
print(sample)
|
79 |
+
print(f"Error: {e}")
|
80 |
+
return {}
|
81 |
+
|
82 |
+
# tokenize document
|
83 |
+
input_ids = processor.tokenizer(
|
84 |
+
sample["text"],
|
85 |
+
add_special_tokens=False,
|
86 |
+
max_length=max_length,
|
87 |
+
padding="max_length",
|
88 |
+
truncation=True,
|
89 |
+
return_tensors="pt",
|
90 |
+
)["input_ids"].squeeze(0)
|
91 |
+
|
92 |
+
labels = input_ids.clone()
|
93 |
+
# model doesn't need to predict pad token
|
94 |
+
labels[labels == processor.tokenizer.pad_token_id] = ignore_id
|
95 |
+
return {"pixel_values": pixel_values, "labels": labels, "target_sequence": sample["text"]}
|
96 |
+
|
97 |
+
dataset = load_dataset("achang/plot_qa", streaming=True)
|
98 |
+
processor = DonutProcessor.from_pretrained("naver-clova-ix/donut-base")
|
99 |
+
processed_dataset = dataset.map(partial(pre_process, processor=processor, max_length=512, ignore_id=-100))
|
100 |
+
```
|
101 |
|
102 |
## Misc
|
103 |
|