atomic2020-origin / make_origin_dataset.py
Estwld's picture
Upload 2 files
c1b0800
raw
history blame
3.92 kB
from collections import Counter
from typing import List
import datasets
import matplotlib.pyplot as plt
import pandas as pd
from constants import (event_centered_2_descriptions,
physical_entity_2_descriptions, relations_map,
social_intercation_2_descriptions)
def show_bar(relation: List):
c = dict(Counter(relation))
keys = list(c.keys())
values = list(c.values())
plt.bar(keys, values)
plt.xticks(rotation=25, fontsize=8)
plt.yticks(fontsize=8)
plt.xlabel('relations')
plt.ylabel('numbers')
plt.title('relations analysis')
for i in range(len(keys)):
plt.text(i, values[i] + 10, str(values[i]), ha='center', fontsize=10)
plt.show()
def read_file(data_path: str):
df = pd.read_csv(data_path, sep='\t', header=None)
df.columns = ['event', 'relation', 'tail']
print(df.head())
event = df['event'].tolist()
relation = df['relation'].tolist()
tail = df['tail'].tolist()
return event, relation, tail
def make_base_dataset(event: List[str], relation: List[str], tail: List[str]):
new_event, new_relation, new_tail = [], [], []
knowledge_type = []
relation_description = []
prev_event, prev_relation, prev_tail = None, None, None
for i in range(len(event)):
if i > 0 and event[i] == prev_event and relation[i] == prev_relation:
new_tail[-1].extend( [tail[i]] )
else:
new_event.append(event[i])
new_relation.append(relation[i])
# insert knowledge type
relation_list = []
for r in list(relations_map.values()):
relation_list.extend(r)
if relation[i] not in relation_list:
raise ValueError(f'dont find match knowledge type named {relation[i]}, please check it!')
for k, v in relations_map.items():
if relation[i] in v:
knowledge_type.append(k)
if k == 'social_intercation':
relation_description.append(social_intercation_2_descriptions[relation[i]])
elif k == 'physical_entity':
relation_description.append(physical_entity_2_descriptions[relation[i]])
elif k == 'event_centered':
relation_description.append(event_centered_2_descriptions[relation[i]])
else:
raise KeyError(f"dont find match relation type named {relation[i]} in dict, please check it!")
new_tail.append( [tail[i]] )
prev_event, prev_relation, prev_tail = event[i], relation[i], tail[i]
df = pd.DataFrame({
'knowledge_type': knowledge_type,
'event': new_event,
'relation': new_relation,
'relation_description': relation_description,
'tail': new_tail,
})
print(df.head())
return df
def get_dataset(data_path: str):
event, relation, tail = read_file(data_path=data_path)
df = make_base_dataset(event=event, relation=relation, tail=tail)
dataset = datasets.Dataset.from_pandas(df, split='train')
print(dataset)
return dataset
def upload_dataset(dataset, repo_id :str, access_token: str, private: bool):
dataset.push_to_hub(
repo_id = repo_id,
private = private,
token = access_token,
)
if __name__ == '__main__':
train_dataset = get_dataset('./dataset/train.tsv')
valid_dataset = get_dataset('./dataset/dev.tsv')
test_dataset = get_dataset('./dataset/test.tsv')
dataset = datasets.DatasetDict({
'train': train_dataset,
'validation': valid_dataset,
'test': test_dataset
})
print(dataset)
upload_dataset(dataset, repo_id='Estwld/atomic2020-origin', private=False, access_token='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')