naveed-stockmark commited on
Commit
eb30e6c
1 Parent(s): 1d202ad

Upload 3 files

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. app.py +121 -0
  3. linking_df_technical_min.csv +3 -0
  4. utils.py +50 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ linking_df_technical_min.csv filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from utils import normalize_text
3
+ import streamlit as st
4
+
5
+ # load wikipedia data
6
+ wiki_df = pd.read_csv("../knowledge_platform/wiki_output/kensho_en_wiki_typing_technical.csv")
7
+
8
+ # filter out technical articles
9
+ exclude_ids = set(wiki_df[(wiki_df.exclude == True) | (wiki_df.technical == False)].page_id.to_list())
10
+ include_skpes = set(wiki_df[wiki_df.page_id.apply(lambda x: x not in exclude_ids)].skpe_id.to_list())
11
+
12
+ wiki_df = wiki_df.drop(columns=['Unnamed: 0', 'en_probs', 'exclude'])
13
+ wiki_df = wiki_df.rename(columns={'title_x': 'en_title'})
14
+
15
+ # load kg df
16
+
17
+ """Load wikidata"""
18
+
19
+ wikidata_df = pd.read_csv("../knowledge_platform/kg_data/wikidata_ss_processed.csv")
20
+
21
+ # filter technical wikidata
22
+ wikidata_df = wikidata_df[wikidata_df.apply(lambda x: x.source_skpe in include_skpes and x.target_skpe in include_skpes, axis=1)]
23
+
24
+
25
+ """KG Infer data"""
26
+
27
+
28
+ rebel_infer_df = pd.read_csv("../knowledge_platform/kg_data/rebel_inference_processed_ss.csv")
29
+
30
+ # filter technical
31
+ rebel_infer_df = rebel_infer_df[rebel_infer_df.apply(lambda x: type(x.source_skpe_id) == str and type(x.target_skpe_id) == str, axis=1)]
32
+
33
+ rebel_infer_df = rebel_infer_df.drop(columns=['instance_id', 'source_text', 'target_text'])
34
+ rebel_infer_df = rebel_infer_df.rename(columns={'source_skpe_id': 'source_skpe', 'target_skpe_id': 'target_skpe', 'source': 'source_en', 'target': 'target_en'})
35
+
36
+
37
+ wikidata_df['source'] = 'wikidata'
38
+ rebel_infer_df['source'] = 'rebel_wikipedia'
39
+
40
+ rebel_infer_df = rebel_infer_df[rebel_infer_df.source_skpe != rebel_infer_df.target_skpe]
41
+
42
+ kg_df = pd.concat([wikidata_df, rebel_infer_df])
43
+
44
+
45
+ # ???
46
+
47
+
48
+ # load entity linking dictionary
49
+ linking_df = pd.read_csv('./linking_df_technical_min.csv')
50
+
51
+ # User Input
52
+ input_text = st.text_input(
53
+ label="Enter first entity name",
54
+ value="semiconductor",
55
+ key="ent",
56
+ )
57
+
58
+ # normalise and match
59
+ text_norm = normalize_text(input_text)
60
+ match_df = linking_df[linking_df.text == text_norm]
61
+
62
+ # top match skpe
63
+ if len(match_df) > 0:
64
+
65
+ top_skpe = match_df.skpe_id.mode()[0]
66
+ all_skpe = set(match_df.skpe_id.to_list())
67
+ skpe_to_count = dict(match_df.skpe_id.value_counts())
68
+
69
+ # Match list
70
+ wiki_match_df = wiki_df[wiki_df.skpe_id.apply(lambda x: x in all_skpe)].copy()
71
+ wiki_match_df['link_score'] = wiki_match_df['skpe_id'].apply(lambda x: skpe_to_count[x] / sum(skpe_to_count.values()))
72
+ wiki_match_df = wiki_match_df.sort_values(by='link_score', ascending=False)
73
+
74
+ else:
75
+ st.write("no matches")
76
+
77
+ # show similar results
78
+ wiki_match_df.sort_values(by='views', ascending=False)[:5]
79
+
80
+ # Stuff that are made out of input
81
+ made_of_df = kg_df[(kg_df.relation == 'made_from_material') & (kg_df.target_skpe == top_skpe)].copy()
82
+ # made_of_list = made_of_df.source_skpe.to_list()
83
+
84
+ all_paths = []
85
+
86
+
87
+ # iterate over first rows
88
+ for first_edge in made_of_df.itertuples():
89
+
90
+ first_item = first_edge.source_skpe
91
+
92
+ # applications of stuff made out of first item
93
+ use_df = kg_df[((kg_df.relation == 'has_use') & (kg_df.source_skpe == first_item)) | ((kg_df.relation == 'uses') & (kg_df.target_skpe == first_item))]
94
+
95
+ # add all 2 len paths
96
+ for second_edge in use_df.itertuples():
97
+ all_paths.append([first_edge, second_edge])
98
+
99
+ # expand to part of
100
+
101
+ # applications of stuff made out of steel # 1
102
+ part_df = kg_df[((kg_df.relation == 'has_part') & (kg_df.target_skpe == first_item)) | (kg_df.relation == 'part_of') & (kg_df.source_skpe == first_item)]
103
+
104
+ # iterate over all parts of product
105
+ for second_edge in part_df.itertuples():
106
+
107
+ # select second item
108
+ second_item = second_edge.source_skpe if second_edge.relation == 'has_part' else second_edge.target_skpe
109
+
110
+ # get uses of second item
111
+ use_df = kg_df[((kg_df.relation == 'has_use') & (kg_df.source_skpe == second_item)) | ((kg_df.relation == 'uses') & (kg_df.target_skpe == second_item))]
112
+
113
+ # add all 3 len paths
114
+ for third_edge in use_df.itertuples():
115
+ all_paths.append([first_edge, second_edge, third_edge])
116
+
117
+ # print all paths
118
+ for path in all_paths:
119
+ for edge in path:
120
+ st.write(f"{edge.source_en} --{edge.relation}--> {edge.target_en} | source: {edge.source}")
121
+ st.write("------")
linking_df_technical_min.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37a47468ddb08e8c54b8b55d9b488c84359a366ad6c1763b84587cea9afdbaa2
3
+ size 157547217
utils.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ def dump_json(file, path):
4
+ """Save json object"""
5
+
6
+ with open(path, 'w', encoding='utf-8') as f:
7
+ json.dump(file, f, indent=4, ensure_ascii=False)
8
+ print("Saved json to path: " + str(path))
9
+
10
+ def load_json(path):
11
+ """load json object"""
12
+ with open(path, 'rb') as f:
13
+ data = json.load(f)
14
+ print("Loaded json from path: " + str(path))
15
+ return data
16
+
17
+ def lead_k_sentences(text, k=50):
18
+ """Select the first k sentences from a Japanese document"""
19
+
20
+ DELIMITER = '。'
21
+
22
+ if DELIMITER in text:
23
+ segments = [seg for seg in text.split(DELIMITER)[:k] if len(seg) > 0]
24
+ return DELIMITER.join(segments) + DELIMITER
25
+ else:
26
+ return text
27
+
28
+ import jsonlines
29
+
30
+ def read_jsonlines(path):
31
+ with jsonlines.open(path) as reader:
32
+ lines = [obj for obj in reader]
33
+ return lines
34
+
35
+ def write_jsonlines(file, path):
36
+ with jsonlines.open(path, 'w') as writer:
37
+ writer.write_all(file)
38
+
39
+ import re
40
+
41
+ def normalize_text(s):
42
+ s = str(s)
43
+ # if not s.isupper():
44
+ # s = re.sub(r"(\w)([A-Z])", r"\1 \2", s) # Respace
45
+ s = re.sub(r'\(.*\)', '', s) # Remove japanese brackets
46
+ s = re.sub(r'\(.*\)', '', s) # Remove english brackets
47
+ s = s.strip()
48
+ s = s.replace(' ', '_')
49
+ s = s.upper()
50
+ return s