sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def changelog(self, api_version, doc): """Add a changelog entry for this api.""" doc = textwrap.dedent(doc).strip() self._changelog[api_version] = doc self._changelog_locations[api_version] = get_callsite_location()
Add a changelog entry for this api.
entailment
def title_prefix(soup): "titlePrefix for article JSON is only articles with certain display_channel values" prefix = None display_channel_match_list = ['feature article', 'insight', 'editorial'] for d_channel in display_channel(soup): if d_channel.lower() in display_channel_match_list: if raw_parser.sub_display_channel(soup): prefix = node_text(first(raw_parser.sub_display_channel(soup))) return prefix
titlePrefix for article JSON is only articles with certain display_channel values
entailment
def title_prefix_json(soup): "titlePrefix with capitalisation changed" prefix = title_prefix(soup) prefix_rewritten = elifetools.json_rewrite.rewrite_json("title_prefix_json", soup, prefix) return prefix_rewritten
titlePrefix with capitalisation changed
entailment
def research_organism(soup): "Find the research-organism from the set of kwd-group tags" if not raw_parser.research_organism_keywords(soup): return [] return list(map(node_text, raw_parser.research_organism_keywords(soup)))
Find the research-organism from the set of kwd-group tags
entailment
def full_research_organism(soup): "research-organism list including inline tags, such as italic" if not raw_parser.research_organism_keywords(soup): return [] return list(map(node_contents_str, raw_parser.research_organism_keywords(soup)))
research-organism list including inline tags, such as italic
entailment
def keywords(soup): """ Find the keywords from the set of kwd-group tags which are typically labelled as the author keywords """ if not raw_parser.author_keywords(soup): return [] return list(map(node_text, raw_parser.author_keywords(soup)))
Find the keywords from the set of kwd-group tags which are typically labelled as the author keywords
entailment
def full_keywords(soup): "author keywords list including inline tags, such as italic" if not raw_parser.author_keywords(soup): return [] return list(map(node_contents_str, raw_parser.author_keywords(soup)))
author keywords list including inline tags, such as italic
entailment
def version_history(soup, html_flag=True): "extract the article version history details" convert = lambda xml_string: xml_to_html(html_flag, xml_string) version_history = [] related_object_tags = raw_parser.related_object(raw_parser.article_meta(soup)) for tag in related_object_tags: article_version = OrderedDict() date_tag = first(raw_parser.date(tag)) if date_tag: copy_attribute(date_tag.attrs, 'date-type', article_version, 'version') (day, month, year) = ymd(date_tag) article_version['day'] = day article_version['month'] = month article_version['year'] = year article_version['date'] = date_struct_nn(year, month, day) copy_attribute(tag.attrs, 'xlink:href', article_version, 'xlink_href') set_if_value(article_version, "comment", convert(node_contents_str(first(raw_parser.comment(tag))))) version_history.append(article_version) return version_history
extract the article version history details
entailment
def article_id_list(soup): """return a list of article-id data""" id_list = [] for article_id_tag in raw_parser.article_id(soup): id_details = OrderedDict() set_if_value(id_details, "type", article_id_tag.get("pub-id-type")) set_if_value(id_details, "value", article_id_tag.text) set_if_value(id_details, "assigning-authority", article_id_tag.get("assigning-authority")) id_list.append(id_details) return id_list
return a list of article-id data
entailment
def copyright_holder_json(soup): "for json output add a full stop if ends in et al" holder = None permissions_tag = raw_parser.article_permissions(soup) if permissions_tag: holder = node_text(raw_parser.copyright_holder(permissions_tag)) if holder is not None and holder.endswith('et al'): holder = holder + '.' return holder
for json output add a full stop if ends in et al
entailment
def subject_area(soup): """ Find the subject areas from article-categories subject tags """ subject_area = [] tags = raw_parser.subject_area(soup) for tag in tags: subject_area.append(node_text(tag)) return subject_area
Find the subject areas from article-categories subject tags
entailment
def display_channel(soup): """ Find the subject areas of type display-channel """ display_channel = [] tags = raw_parser.display_channel(soup) for tag in tags: display_channel.append(node_text(tag)) return display_channel
Find the subject areas of type display-channel
entailment
def category(soup): """ Find the category from subject areas """ category = [] tags = raw_parser.category(soup) for tag in tags: category.append(node_text(tag)) return category
Find the category from subject areas
entailment
def ymd(soup): """ Get the year, month and day from child tags """ day = node_text(raw_parser.day(soup)) month = node_text(raw_parser.month(soup)) year = node_text(raw_parser.year(soup)) return (day, month, year)
Get the year, month and day from child tags
entailment
def pub_date(soup): """ Return the publishing date in struct format pub_date_date, pub_date_day, pub_date_month, pub_date_year, pub_date_timestamp Default date_type is pub """ pub_date = first(raw_parser.pub_date(soup, date_type="pub")) if pub_date is None: pub_date = first(raw_parser.pub_date(soup, date_type="publication")) if pub_date is None: return None (day, month, year) = ymd(pub_date) return date_struct(year, month, day)
Return the publishing date in struct format pub_date_date, pub_date_day, pub_date_month, pub_date_year, pub_date_timestamp Default date_type is pub
entailment
def pub_dates(soup): """ return a list of all the pub dates """ pub_dates = [] tags = raw_parser.pub_date(soup) for tag in tags: pub_date = OrderedDict() copy_attribute(tag.attrs, 'publication-format', pub_date) copy_attribute(tag.attrs, 'date-type', pub_date) copy_attribute(tag.attrs, 'pub-type', pub_date) for tag_attr in ["date-type", "pub-type"]: if tag_attr in tag.attrs: (day, month, year) = ymd(tag) pub_date['day'] = day pub_date['month'] = month pub_date['year'] = year pub_date['date'] = date_struct_nn(year, month, day) pub_dates.append(pub_date) return pub_dates
return a list of all the pub dates
entailment
def history_date(soup, date_type = None): """ Find a date in the history tag for the specific date_type typical date_type values: received, accepted """ if(date_type == None): return None history_date = raw_parser.history_date(soup, date_type) if history_date is None: return None (day, month, year) = ymd(history_date) return date_struct(year, month, day)
Find a date in the history tag for the specific date_type typical date_type values: received, accepted
entailment
def collection_year(soup): """ Pub date of type collection will hold a year element for VOR articles """ pub_date = first(raw_parser.pub_date(soup, pub_type="collection")) if not pub_date: pub_date = first(raw_parser.pub_date(soup, date_type="collection")) if not pub_date: return None year = None year_tag = raw_parser.year(pub_date) if year_tag: year = int(node_text(year_tag)) return year
Pub date of type collection will hold a year element for VOR articles
entailment
def abstracts(soup): """ Find the article abstract and format it """ abstracts = [] abstract_tags = raw_parser.abstract(soup) for tag in abstract_tags: abstract = {} abstract["abstract_type"] = tag.get("abstract-type") title_tag = raw_parser.title(tag) if title_tag: abstract["title"] = node_text(title_tag) abstract["content"] = None if raw_parser.paragraph(tag): abstract["content"] = "" abstract["full_content"] = "" good_paragraphs = remove_doi_paragraph(raw_parser.paragraph(tag)) # Plain text content glue = "" for p_tag in good_paragraphs: abstract["content"] += glue + node_text(p_tag) glue = " " # Content including markup tags # When more than one paragraph, wrap each in a <p> tag for p_tag in good_paragraphs: abstract["full_content"] += '<p>' + node_contents_str(p_tag) + '</p>' abstracts.append(abstract) return abstracts
Find the article abstract and format it
entailment
def component_doi(soup): """ Look for all object-id of pub-type-id = doi, these are the component DOI tags """ component_doi = [] object_id_tags = raw_parser.object_id(soup, pub_id_type = "doi") # Get components too for later component_list = components(soup) position = 1 for tag in object_id_tags: component_object = {} component_object["doi"] = doi_uri_to_doi(tag.text) component_object["position"] = position # Try to find the type of component for component in component_list: if "doi" in component and component["doi"] == component_object["doi"]: component_object["type"] = component["type"] component_doi.append(component_object) position = position + 1 return component_doi
Look for all object-id of pub-type-id = doi, these are the component DOI tags
entailment
def tag_details(tag, nodenames): """ Used in media and graphics to extract data from their parent tags """ details = {} details['type'] = tag.name details['ordinal'] = tag_ordinal(tag) # Ordinal value if tag_details_sibling_ordinal(tag): details['sibling_ordinal'] = tag_details_sibling_ordinal(tag) # Asset name if tag_details_asset(tag): details['asset'] = tag_details_asset(tag) object_id_tag = first(raw_parser.object_id(tag, pub_id_type= "doi")) if object_id_tag: details['component_doi'] = extract_component_doi(tag, nodenames) return details
Used in media and graphics to extract data from their parent tags
entailment
def media(soup): """ All media tags and some associated data about the related component doi and the parent of that doi (not always present) """ media = [] media_tags = raw_parser.media(soup) position = 1 for tag in media_tags: media_item = {} copy_attribute(tag.attrs, 'mime-subtype', media_item) copy_attribute(tag.attrs, 'mimetype', media_item) copy_attribute(tag.attrs, 'xlink:href', media_item, 'xlink_href') copy_attribute(tag.attrs, 'content-type', media_item) nodenames = ["sub-article", "media", "fig-group", "fig", "supplementary-material"] details = tag_details(tag, nodenames) copy_attribute(details, 'component_doi', media_item) copy_attribute(details, 'type', media_item) copy_attribute(details, 'sibling_ordinal', media_item) # Try to get the component DOI of the parent tag parent_tag = first_parent(tag, nodenames) if parent_tag: acting_parent_tag = component_acting_parent_tag(parent_tag, tag) if acting_parent_tag: details = tag_details(acting_parent_tag, nodenames) copy_attribute(details, 'type', media_item, 'parent_type') copy_attribute(details, 'ordinal', media_item, 'parent_ordinal') copy_attribute(details, 'asset', media_item, 'parent_asset') copy_attribute(details, 'sibling_ordinal', media_item, 'parent_sibling_ordinal') copy_attribute(details, 'component_doi', media_item, 'parent_component_doi') # Try to get the parent parent p_parent_tag = first_parent(parent_tag, nodenames) if p_parent_tag: acting_p_parent_tag = component_acting_parent_tag(p_parent_tag, parent_tag) if acting_p_parent_tag: details = tag_details(acting_p_parent_tag, nodenames) copy_attribute(details, 'type', media_item, 'p_parent_type') copy_attribute(details, 'ordinal', media_item, 'p_parent_ordinal') copy_attribute(details, 'asset', media_item, 'p_parent_asset') copy_attribute(details, 'sibling_ordinal', media_item, 'p_parent_sibling_ordinal') copy_attribute(details, 'component_doi', media_item, 'p_parent_component_doi') # Try to get the parent parent parent p_p_parent_tag = first_parent(p_parent_tag, nodenames) if p_p_parent_tag: acting_p_p_parent_tag = component_acting_parent_tag(p_p_parent_tag, p_parent_tag) if acting_p_p_parent_tag: details = tag_details(acting_p_p_parent_tag, nodenames) copy_attribute(details, 'type', media_item, 'p_p_parent_type') copy_attribute(details, 'ordinal', media_item, 'p_p_parent_ordinal') copy_attribute(details, 'asset', media_item, 'p_p_parent_asset') copy_attribute(details, 'sibling_ordinal', media_item, 'p_p_parent_sibling_ordinal') copy_attribute(details, 'component_doi', media_item, 'p_p_parent_component_doi') # Increment the position media_item['position'] = position # Ordinal should be the same as position in this case but set it anyway media_item['ordinal'] = tag_ordinal(tag) media.append(media_item) position += 1 return media
All media tags and some associated data about the related component doi and the parent of that doi (not always present)
entailment
def graphics(soup): """ All graphic tags and some associated data about the related component doi and the parent of that doi (not always present), and whether it is part of a figure supplement """ graphics = [] graphic_tags = raw_parser.graphic(soup) position = 1 for tag in graphic_tags: graphic_item = {} copy_attribute(tag.attrs, 'xlink:href', graphic_item, 'xlink_href') # Get the tag type nodenames = ["sub-article", "fig-group", "fig", "app"] details = tag_details(tag, nodenames) copy_attribute(details, 'type', graphic_item) parent_tag = first_parent(tag, nodenames) if parent_tag: details = tag_details(parent_tag, nodenames) copy_attribute(details, 'type', graphic_item, 'parent_type') copy_attribute(details, 'ordinal', graphic_item, 'parent_ordinal') copy_attribute(details, 'asset', graphic_item, 'parent_asset') copy_attribute(details, 'sibling_ordinal', graphic_item, 'parent_sibling_ordinal') copy_attribute(details, 'component_doi', graphic_item, 'parent_component_doi') # Try to get the parent parent - special for looking at fig tags # use component_acting_parent_tag p_parent_tag = first_parent(parent_tag, nodenames) if p_parent_tag: acting_p_parent_tag = component_acting_parent_tag(p_parent_tag, parent_tag) if acting_p_parent_tag: details = tag_details(acting_p_parent_tag, nodenames) copy_attribute(details, 'type', graphic_item, 'p_parent_type') copy_attribute(details, 'ordinal', graphic_item, 'p_parent_ordinal') copy_attribute(details, 'asset', graphic_item, 'p_parent_asset') copy_attribute(details, 'sibling_ordinal', graphic_item, 'p_parent_sibling_ordinal') copy_attribute(details, 'component_doi', graphic_item, 'p_parent_component_doi') # Increment the position graphic_item['position'] = position # Ordinal should be the same as position in this case but set it anyway graphic_item['ordinal'] = tag_ordinal(tag) graphics.append(graphic_item) position += 1 return graphics
All graphic tags and some associated data about the related component doi and the parent of that doi (not always present), and whether it is part of a figure supplement
entailment
def inline_graphics(soup): """ inline-graphic tags """ inline_graphics = [] inline_graphic_tags = raw_parser.inline_graphic(soup) position = 1 for tag in inline_graphic_tags: item = {} copy_attribute(tag.attrs, 'xlink:href', item, 'xlink_href') # Get the tag type nodenames = ["sub-article"] details = tag_details(tag, nodenames) copy_attribute(details, 'type', item) # Increment the position item['position'] = position # Ordinal should be the same as position in this case but set it anyway item['ordinal'] = tag_ordinal(tag) inline_graphics.append(item) return inline_graphics
inline-graphic tags
entailment
def self_uri(soup): """ self-uri tags """ self_uri = [] self_uri_tags = raw_parser.self_uri(soup) position = 1 for tag in self_uri_tags: item = {} copy_attribute(tag.attrs, 'xlink:href', item, 'xlink_href') copy_attribute(tag.attrs, 'content-type', item) # Get the tag type nodenames = ["sub-article"] details = tag_details(tag, nodenames) copy_attribute(details, 'type', item) # Increment the position item['position'] = position # Ordinal should be the same as position in this case but set it anyway item['ordinal'] = tag_ordinal(tag) self_uri.append(item) return self_uri
self-uri tags
entailment
def supplementary_material(soup): """ supplementary-material tags """ supplementary_material = [] supplementary_material_tags = raw_parser.supplementary_material(soup) position = 1 for tag in supplementary_material_tags: item = {} copy_attribute(tag.attrs, 'id', item) # Get the tag type nodenames = ["supplementary-material"] details = tag_details(tag, nodenames) copy_attribute(details, 'type', item) copy_attribute(details, 'asset', item) copy_attribute(details, 'component_doi', item) copy_attribute(details, 'sibling_ordinal', item) if raw_parser.label(tag): item['label'] = node_text(raw_parser.label(tag)) item['full_label'] = node_contents_str(raw_parser.label(tag)) # Increment the position item['position'] = position # Ordinal should be the same as position in this case but set it anyway item['ordinal'] = tag_ordinal(tag) supplementary_material.append(item) return supplementary_material
supplementary-material tags
entailment
def contrib_email(contrib_tag): """ Given a contrib tag, look for an email tag, and only return the value if it is not inside an aff tag """ email = [] for email_tag in extract_nodes(contrib_tag, "email"): if email_tag.parent.name != "aff": email.append(email_tag.text) return email if len(email) > 0 else None
Given a contrib tag, look for an email tag, and only return the value if it is not inside an aff tag
entailment
def contrib_phone(contrib_tag): """ Given a contrib tag, look for an phone tag """ phone = None if raw_parser.phone(contrib_tag): phone = first(raw_parser.phone(contrib_tag)).text return phone
Given a contrib tag, look for an phone tag
entailment
def contrib_inline_aff(contrib_tag): """ Given a contrib tag, look for an aff tag directly inside it """ aff_tags = [] for child_tag in contrib_tag: if child_tag and child_tag.name and child_tag.name == "aff": aff_tags.append(child_tag) return aff_tags
Given a contrib tag, look for an aff tag directly inside it
entailment
def contrib_xref(contrib_tag, ref_type): """ Given a contrib tag, look for an xref tag of type ref_type directly inside the contrib tag """ aff_tags = [] for child_tag in contrib_tag: if (child_tag and child_tag.name and child_tag.name == "xref" and child_tag.get('ref-type') and child_tag.get('ref-type') == ref_type): aff_tags.append(child_tag) return aff_tags
Given a contrib tag, look for an xref tag of type ref_type directly inside the contrib tag
entailment
def all_contributors(soup, detail="brief"): "find all contributors not contrained to only the ones in article meta" contrib_tags = raw_parser.contributors(soup) contributors = format_authors(soup, contrib_tags, detail) return contributors
find all contributors not contrained to only the ones in article meta
entailment
def authors_non_byline(soup, detail="full"): """Non-byline authors for group author members""" # Get a filtered list of contributors, in order to get their group-author-id contrib_type = "author non-byline" contributors_ = contributors(soup, detail) non_byline_authors = [author for author in contributors_ if author.get('type', None) == contrib_type] # Then renumber their position attribute position = 1 for author in non_byline_authors: author["position"] = position position = position + 1 return non_byline_authors
Non-byline authors for group author members
entailment
def refs(soup): """Find and return all the references""" tags = raw_parser.ref_list(soup) refs = [] position = 1 article_doi = doi(soup) for tag in tags: ref = {} ref['ref'] = ref_text(tag) # ref_id copy_attribute(tag.attrs, "id", ref) # article_title if raw_parser.article_title(tag): ref['article_title'] = node_text(raw_parser.article_title(tag)) ref['full_article_title'] = node_contents_str(raw_parser.article_title(tag)) if raw_parser.pub_id(tag, "pmid"): ref['pmid'] = node_contents_str(first(raw_parser.pub_id(tag, "pmid"))) if raw_parser.pub_id(tag, "isbn"): ref['isbn'] = node_contents_str(first(raw_parser.pub_id(tag, "isbn"))) if raw_parser.pub_id(tag, "doi"): ref['reference_id'] = node_contents_str(first(raw_parser.pub_id(tag, "doi"))) ref['doi'] = doi_uri_to_doi(node_contents_str(first(raw_parser.pub_id(tag, "doi")))) uri_tag = None if raw_parser.ext_link(tag, "uri"): uri_tag = first(raw_parser.ext_link(tag, "uri")) elif raw_parser.uri(tag): uri_tag = first(raw_parser.uri(tag)) if uri_tag: set_if_value(ref, "uri", uri_tag.get('xlink:href')) set_if_value(ref, "uri_text", node_contents_str(uri_tag)) # look for a pub-id tag if no uri yet if not ref.get('uri') and raw_parser.pub_id(tag, "archive"): pub_id_tag = first(raw_parser.pub_id(tag, pub_id_type="archive")) set_if_value(ref, "uri", pub_id_tag.get('xlink:href')) # accession, could be in either of two tags set_if_value(ref, "accession", node_contents_str(first(raw_parser.object_id(tag, "art-access-id")))) if not ref.get('accession'): set_if_value(ref, "accession", node_contents_str(first(raw_parser.pub_id(tag, pub_id_type="accession")))) if not ref.get('accession'): set_if_value(ref, "accession", node_contents_str(first(raw_parser.pub_id(tag, pub_id_type="archive")))) if(raw_parser.year(tag)): set_if_value(ref, "year", node_text(raw_parser.year(tag))) set_if_value(ref, "year-iso-8601-date", raw_parser.year(tag).get('iso-8601-date')) if(raw_parser.date_in_citation(tag)): set_if_value(ref, "date-in-citation", node_text(first(raw_parser.date_in_citation(tag)))) set_if_value(ref, "iso-8601-date", first(raw_parser.date_in_citation(tag)).get('iso-8601-date')) if(raw_parser.patent(tag)): set_if_value(ref, "patent", node_text(first(raw_parser.patent(tag)))) set_if_value(ref, "country", first(raw_parser.patent(tag)).get('country')) set_if_value(ref, "source", node_text(first(raw_parser.source(tag)))) set_if_value(ref, "elocation-id", node_text(first(raw_parser.elocation_id(tag)))) if raw_parser.element_citation(tag): copy_attribute(first(raw_parser.element_citation(tag)).attrs, "publication-type", ref) if "publication-type" not in ref and raw_parser.mixed_citations(tag): copy_attribute(first(raw_parser.mixed_citations(tag)).attrs, "publication-type", ref) # authors person_group = raw_parser.person_group(tag) authors = [] for group in person_group: author_type = None if "person-group-type" in group.attrs: author_type = group["person-group-type"] # Read name or collab tag in the order they are listed for name_or_collab_tag in extract_nodes(group, ["name", "string-name", "collab"]): author = {} # Shared tag attribute set_if_value(author, "group-type", author_type) # name tag attributes if name_or_collab_tag.name in ["name", "string-name"]: set_if_value(author, "surname", node_text(first(raw_parser.surname(name_or_collab_tag)))) set_if_value(author, "given-names", node_text(first(raw_parser.given_names(name_or_collab_tag)))) set_if_value(author, "suffix", node_text(first(raw_parser.suffix(name_or_collab_tag)))) # collab tag attribute if name_or_collab_tag.name == "collab": set_if_value(author, "collab", node_contents_str(name_or_collab_tag)) if len(author) > 0: authors.append(author) # etal for the person group if first(raw_parser.etal(group)): author = {} author['etal'] = True set_if_value(author, "group-type", author_type) authors.append(author) # Check for collab tag not wrapped in a person-group for backwards compatibility if len(person_group) == 0: collab_tags = raw_parser.collab(tag) for collab_tag in collab_tags: author = {} set_if_value(author, "group-type", "author") set_if_value(author, "collab", node_contents_str(collab_tag)) if len(author) > 0: authors.append(author) if len(authors) > 0: ref['authors'] = authors set_if_value(ref, "volume", node_text(first(raw_parser.volume(tag)))) set_if_value(ref, "issue", node_text(first(raw_parser.issue(tag)))) set_if_value(ref, "fpage", node_text(first(raw_parser.fpage(tag)))) set_if_value(ref, "lpage", node_text(first(raw_parser.lpage(tag)))) set_if_value(ref, "collab", node_text(first(raw_parser.collab(tag)))) set_if_value(ref, "publisher_loc", node_text(first(raw_parser.publisher_loc(tag)))) set_if_value(ref, "publisher_name", node_text(first(raw_parser.publisher_name(tag)))) set_if_value(ref, "edition", node_contents_str(first(raw_parser.edition(tag)))) set_if_value(ref, "version", node_contents_str(first(raw_parser.version(tag)))) set_if_value(ref, "chapter-title", node_contents_str(first(raw_parser.chapter_title(tag)))) set_if_value(ref, "comment", node_text(first(raw_parser.comment(tag)))) set_if_value(ref, "data-title", node_contents_str(first(raw_parser.data_title(tag)))) set_if_value(ref, "conf-name", node_text(first(raw_parser.conf_name(tag)))) # If not empty, add position value, append, then increment the position counter if(len(ref) > 0): ref['article_doi'] = article_doi ref['position'] = position refs.append(ref) position += 1 return refs
Find and return all the references
entailment
def extract_component_doi(tag, nodenames): """ Used to get component DOI from a tag and confirm it is actually for that tag and it is not for one of its children in the list of nodenames """ component_doi = None if(tag.name == "sub-article"): component_doi = doi_uri_to_doi(node_text(first(raw_parser.article_id(tag, pub_id_type= "doi")))) else: object_id_tag = first(raw_parser.object_id(tag, pub_id_type= "doi")) # Tweak: if it is media and has no object_id_tag then it is not a "component" if tag.name == "media" and not object_id_tag: component_doi = None else: # Check the object id is for this tag and not one of its children # This happens for example when boxed text has a child figure, # the boxed text does not have a DOI, the figure does have one if object_id_tag and first_parent(object_id_tag, nodenames).name == tag.name: component_doi = doi_uri_to_doi(node_text(object_id_tag)) return component_doi
Used to get component DOI from a tag and confirm it is actually for that tag and it is not for one of its children in the list of nodenames
entailment
def components(soup): """ Find the components, i.e. those parts that would be assigned a unique component DOI, such as figures, tables, etc. - position is in what order the tag appears in the entire set of nodes - ordinal is in what order it is for all the tags of its own type """ components = [] nodenames = ["abstract", "fig", "table-wrap", "media", "chem-struct-wrap", "sub-article", "supplementary-material", "boxed-text", "app"] # Count node order overall position = 1 position_by_type = {} for nodename in nodenames: position_by_type[nodename] = 1 article_doi = doi(soup) # Find all tags for all component_types, allows the order # in which they are found to be preserved component_tags = extract_nodes(soup, nodenames) for tag in component_tags: component = OrderedDict() # Component type is the tag's name ctype = tag.name # First find the doi if present component_doi = extract_component_doi(tag, nodenames) if component_doi is None: continue else: component['doi'] = doi_uri_to_doi(component_doi) component['doi_url'] = doi_to_doi_uri(component['doi']) copy_attribute(tag.attrs, 'id', component) if(ctype == "sub-article"): title_tag = raw_parser.article_title(tag) elif(ctype == "boxed-text"): title_tag = title_tag_inspected(tag, tag.name, direct_sibling_only=True) if not title_tag: title_tag = title_tag_inspected(tag, "caption", "boxed-text") # New kitchen sink has boxed-text inside app tags, tag the sec tag title if so # but do not take it if there is a caption if (not title_tag and tag.parent and tag.parent.name in ["sec", "app"] and not caption_tag_inspected(tag, tag.name)): title_tag = title_tag_inspected(tag.parent, tag.parent.name, direct_sibling_only=True) else: title_tag = raw_parser.title(tag) if title_tag: component['title'] = node_text(title_tag) component['full_title'] = node_contents_str(title_tag) if ctype == "boxed-text": label_tag = label_tag_inspected(tag, "boxed-text") else: label_tag = raw_parser.label(tag) if label_tag: component['label'] = node_text(label_tag) component['full_label'] = node_contents_str(label_tag) if raw_parser.caption(tag): first_paragraph = first(paragraphs(raw_parser.caption(tag))) # fix a problem with the new kitchen sink of caption within caption tag if first_paragraph: nested_caption = raw_parser.caption(first_paragraph) if nested_caption: nested_paragraphs = paragraphs(nested_caption) first_paragraph = first(nested_paragraphs) or first_paragraph if first_paragraph and not starts_with_doi(first_paragraph): # Remove the supplementary tag from the paragraph if present if raw_parser.supplementary_material(first_paragraph): first_paragraph = remove_tag_from_tag(first_paragraph, 'supplementary-material') if node_text(first_paragraph).strip(): component['caption'] = node_text(first_paragraph) component['full_caption'] = node_contents_str(first_paragraph) if raw_parser.permissions(tag): component['permissions'] = [] for permissions_tag in raw_parser.permissions(tag): permissions_item = {} if raw_parser.copyright_statement(permissions_tag): permissions_item['copyright_statement'] = \ node_text(raw_parser.copyright_statement(permissions_tag)) if raw_parser.copyright_year(permissions_tag): permissions_item['copyright_year'] = \ node_text(raw_parser.copyright_year(permissions_tag)) if raw_parser.copyright_holder(permissions_tag): permissions_item['copyright_holder'] = \ node_text(raw_parser.copyright_holder(permissions_tag)) if raw_parser.licence_p(permissions_tag): permissions_item['license'] = \ node_text(first(raw_parser.licence_p(permissions_tag))) permissions_item['full_license'] = \ node_contents_str(first(raw_parser.licence_p(permissions_tag))) component['permissions'].append(permissions_item) if raw_parser.contributors(tag): component['contributors'] = [] for contributor_tag in raw_parser.contributors(tag): component['contributors'].append(format_contributor(contributor_tag, soup)) # There are only some parent tags we care about for components # and only check two levels of parentage parent_nodenames = ["sub-article", "fig-group", "fig", "boxed-text", "table-wrap", "app", "media"] parent_tag = first_parent(tag, parent_nodenames) if parent_tag: # For fig-group we actually want the first fig of the fig-group as the parent acting_parent_tag = component_acting_parent_tag(parent_tag, tag) # Only counts if the acting parent tag has a DOI if (acting_parent_tag and \ extract_component_doi(acting_parent_tag, parent_nodenames) is not None): component['parent_type'] = acting_parent_tag.name component['parent_ordinal'] = tag_ordinal(acting_parent_tag) component['parent_sibling_ordinal'] = tag_details_sibling_ordinal(acting_parent_tag) component['parent_asset'] = tag_details_asset(acting_parent_tag) # Look for parent parent, if available parent_parent_tag = first_parent(parent_tag, parent_nodenames) if parent_parent_tag: acting_parent_tag = component_acting_parent_tag(parent_parent_tag, parent_tag) if (acting_parent_tag and \ extract_component_doi(acting_parent_tag, parent_nodenames) is not None): component['parent_parent_type'] = acting_parent_tag.name component['parent_parent_ordinal'] = tag_ordinal(acting_parent_tag) component['parent_parent_sibling_ordinal'] = tag_details_sibling_ordinal(acting_parent_tag) component['parent_parent_asset'] = tag_details_asset(acting_parent_tag) content = "" for p_tag in extract_nodes(tag, "p"): if content != "": # Add a space before each new paragraph for now content = content + " " content = content + node_text(p_tag) if(content != ""): component['content'] = content # mime type media_tag = None if(ctype == "media"): media_tag = tag elif(ctype == "supplementary-material"): media_tag = first(raw_parser.media(tag)) if media_tag: component['mimetype'] = media_tag.get("mimetype") component['mime-subtype'] = media_tag.get("mime-subtype") if(len(component) > 0): component['article_doi'] = article_doi component['type'] = ctype component['position'] = position # Ordinal is based on all tags of the same type even if they have no DOI component['ordinal'] = tag_ordinal(tag) component['sibling_ordinal'] = tag_details_sibling_ordinal(tag) component['asset'] = tag_details_asset(tag) #component['ordinal'] = position_by_type[ctype] components.append(component) position += 1 position_by_type[ctype] += 1 return components
Find the components, i.e. those parts that would be assigned a unique component DOI, such as figures, tables, etc. - position is in what order the tag appears in the entire set of nodes - ordinal is in what order it is for all the tags of its own type
entailment
def correspondence(soup): """ Find the corresp tags included in author-notes for primary correspondence """ correspondence = [] author_notes_nodes = raw_parser.author_notes(soup) if author_notes_nodes: corresp_nodes = raw_parser.corresp(author_notes_nodes) for tag in corresp_nodes: correspondence.append(tag.text) return correspondence
Find the corresp tags included in author-notes for primary correspondence
entailment
def author_notes(soup): """ Find the fn tags included in author-notes """ author_notes = [] author_notes_section = raw_parser.author_notes(soup) if author_notes_section: fn_nodes = raw_parser.fn(author_notes_section) for tag in fn_nodes: if 'fn-type' in tag.attrs: if(tag['fn-type'] != 'present-address'): author_notes.append(node_text(tag)) return author_notes
Find the fn tags included in author-notes
entailment
def full_author_notes(soup, fntype_filter=None): """ Find the fn tags included in author-notes """ notes = [] author_notes_section = raw_parser.author_notes(soup) if author_notes_section: fn_nodes = raw_parser.fn(author_notes_section) notes = footnotes(fn_nodes, fntype_filter) return notes
Find the fn tags included in author-notes
entailment
def competing_interests(soup, fntype_filter): """ Find the fn tags included in the competing interest """ competing_interests_section = extract_nodes(soup, "fn-group", attr="content-type", value="competing-interest") if not competing_interests_section: return None fn = extract_nodes(first(competing_interests_section), "fn") interests = footnotes(fn, fntype_filter) return interests
Find the fn tags included in the competing interest
entailment
def author_contributions(soup, fntype_filter): """ Find the fn tags included in the competing interest """ author_contributions_section = extract_nodes(soup, "fn-group", attr="content-type", value="author-contribution") if not author_contributions_section: return None fn = extract_nodes(first(author_contributions_section), "fn") cons = footnotes(fn, fntype_filter) return cons
Find the fn tags included in the competing interest
entailment
def full_award_groups(soup): """ Find the award-group items and return a list of details """ award_groups = [] funding_group_section = extract_nodes(soup, "funding-group") # counter for auto generated id values, if required generated_id_counter = 1 for fg in funding_group_section: award_group_tags = extract_nodes(fg, "award-group") for ag in award_group_tags: if 'id' in ag.attrs: ref = ag['id'] else: # hack: generate and increment an id value none is available ref = "award-group-{id}".format(id=generated_id_counter) generated_id_counter += 1 award_group = {} award_group_id = award_group_award_id(ag) if award_group_id is not None: award_group['award-id'] = first(award_group_id) funding_sources = full_award_group_funding_source(ag) source = first(funding_sources) if source is not None: copy_attribute(source, 'institution', award_group) copy_attribute(source, 'institution-id', award_group, 'id') copy_attribute(source, 'institution-id-type', award_group, destination_key='id-type') award_group_by_ref = {} award_group_by_ref[ref] = award_group award_groups.append(award_group_by_ref) return award_groups
Find the award-group items and return a list of details
entailment
def award_groups(soup): """ Find the award-group items and return a list of details """ award_groups = [] funding_group_section = extract_nodes(soup, "funding-group") for fg in funding_group_section: award_group_tags = extract_nodes(fg, "award-group") for ag in award_group_tags: award_group = {} award_group['funding_source'] = award_group_funding_source(ag) award_group['recipient'] = award_group_principal_award_recipient(ag) award_group['award_id'] = award_group_award_id(ag) award_groups.append(award_group) return award_groups
Find the award-group items and return a list of details
entailment
def award_group_funding_source(tag): """ Given a funding group element Find the award group funding sources, one for each item found in the get_funding_group section """ award_group_funding_source = [] funding_source_tags = extract_nodes(tag, "funding-source") for t in funding_source_tags: award_group_funding_source.append(t.text) return award_group_funding_source
Given a funding group element Find the award group funding sources, one for each item found in the get_funding_group section
entailment
def full_award_group_funding_source(tag): """ Given a funding group element Find the award group funding sources, one for each item found in the get_funding_group section """ award_group_funding_sources = [] funding_source_nodes = extract_nodes(tag, "funding-source") for funding_source_node in funding_source_nodes: award_group_funding_source = {} institution_nodes = extract_nodes(funding_source_node, 'institution') institution_node = first(institution_nodes) if institution_node: award_group_funding_source['institution'] = node_text(institution_node) if 'content-type' in institution_node.attrs: award_group_funding_source['institution-type'] = institution_node['content-type'] institution_id_nodes = extract_nodes(funding_source_node, 'institution-id') institution_id_node = first(institution_id_nodes) if institution_id_node: award_group_funding_source['institution-id'] = node_text(institution_id_node) if 'institution-id-type' in institution_id_node.attrs: award_group_funding_source['institution-id-type'] = institution_id_node['institution-id-type'] award_group_funding_sources.append(award_group_funding_source) return award_group_funding_sources
Given a funding group element Find the award group funding sources, one for each item found in the get_funding_group section
entailment
def award_group_award_id(tag): """ Find the award group award id, one for each item found in the get_funding_group section """ award_group_award_id = [] award_id_tags = extract_nodes(tag, "award-id") for t in award_id_tags: award_group_award_id.append(t.text) return award_group_award_id
Find the award group award id, one for each item found in the get_funding_group section
entailment
def award_group_principal_award_recipient(tag): """ Find the award group principal award recipient, one for each item found in the get_funding_group section """ award_group_principal_award_recipient = [] principal_award_recipients = extract_nodes(tag, "principal-award-recipient") for t in principal_award_recipients: principal_award_recipient_text = "" institution = node_text(first(extract_nodes(t, "institution"))) surname = node_text(first(extract_nodes(t, "surname"))) given_names = node_text(first(extract_nodes(t, "given-names"))) string_name = node_text(first(raw_parser.string_name(t))) # Concatenate name and institution values if found # while filtering out excess whitespace if(given_names): principal_award_recipient_text += given_names if(principal_award_recipient_text != ""): principal_award_recipient_text += " " if(surname): principal_award_recipient_text += surname if(institution): principal_award_recipient_text += institution if(string_name): principal_award_recipient_text += string_name award_group_principal_award_recipient.append(principal_award_recipient_text) return award_group_principal_award_recipient
Find the award group principal award recipient, one for each item found in the get_funding_group section
entailment
def object_id_doi(tag, parent_tag_name=None): """DOI in an object-id tag found inside the tag""" doi = None object_id = None object_ids = raw_parser.object_id(tag, "doi") if object_ids: object_id = first([id_ for id_ in object_ids]) if parent_tag_name and object_id and object_id.parent.name != parent_tag_name: object_id = None if object_id: doi = node_contents_str(object_id) return doi
DOI in an object-id tag found inside the tag
entailment
def title_tag_inspected(tag, parent_tag_name=None, p_parent_tag_name=None, direct_sibling_only=False): """Extract the title tag and sometimes inspect its parents""" title_tag = None if direct_sibling_only is True: for sibling_tag in tag: if sibling_tag.name and sibling_tag.name == "title": title_tag = sibling_tag else: title_tag = raw_parser.title(tag) if parent_tag_name and p_parent_tag_name: if (title_tag and title_tag.parent.name and title_tag.parent.parent.name and title_tag.parent.name == parent_tag_name and title_tag.parent.parent.name == p_parent_tag_name): pass else: title_tag = None return title_tag
Extract the title tag and sometimes inspect its parents
entailment
def title_text(tag, parent_tag_name=None, p_parent_tag_name=None, direct_sibling_only=False): """Extract the text of a title tag and sometimes inspect its parents""" title = None title_tag = title_tag_inspected(tag, parent_tag_name, p_parent_tag_name, direct_sibling_only) if title_tag: title = node_contents_str(title_tag) return title
Extract the text of a title tag and sometimes inspect its parents
entailment
def boxed_text_to_image_block(tag): "covert boxed-text to an image block containing an inline-graphic" tag_block = OrderedDict() image_content = body_block_image_content(first(raw_parser.inline_graphic(tag))) tag_block["type"] = "image" set_if_value(tag_block, "doi", doi_uri_to_doi(object_id_doi(tag, tag.name))) set_if_value(tag_block, "id", tag.get("id")) set_if_value(tag_block, "image", image_content) # render paragraphs into a caption p_tags = raw_parser.paragraph(tag) caption_content = [] for p_tag in p_tags: if not raw_parser.inline_graphic(p_tag): caption_content.append(body_block_content(p_tag)) set_if_value(tag_block, "caption", caption_content) return tag_block
covert boxed-text to an image block containing an inline-graphic
entailment
def body_json(soup, base_url=None): """ Get body json and then alter it with section wrapping and removing boxed-text """ body_content = body(soup, remove_key_info_box=True, base_url=base_url) # Wrap in a section if the first block is not a section if (body_content and len(body_content) > 0 and "type" in body_content[0] and body_content[0]["type"] != "section"): # Wrap this one new_body_section = OrderedDict() new_body_section["type"] = "section" new_body_section["id"] = "s0" new_body_section["title"] = "Main text" new_body_section["content"] = [] for body_block in body_content: new_body_section["content"].append(body_block) new_body = [] new_body.append(new_body_section) body_content = new_body body_content_rewritten = elifetools.json_rewrite.rewrite_json("body_json", soup, body_content) return body_content_rewritten
Get body json and then alter it with section wrapping and removing boxed-text
entailment
def body_block_content_render(tag, recursive=False, base_url=None): """ Render the tag as body content and call recursively if the tag has child tags """ block_content_list = [] tag_content = OrderedDict() if tag.name == "p": for block_content in body_block_paragraph_render(tag, base_url=base_url): if block_content != {}: block_content_list.append(block_content) else: tag_content = body_block_content(tag, base_url=base_url) nodenames = body_block_nodenames() tag_content_content = [] # Collect the content of the tag but only for some tags if tag.name not in ["p", "fig", "table-wrap", "list", "media", "disp-quote", "code"]: for child_tag in tag: if not(hasattr(child_tag, 'name')): continue if child_tag.name == "p": # Ignore paragraphs that start with DOI: if node_text(child_tag) and len(remove_doi_paragraph([child_tag])) <= 0: continue for block_content in body_block_paragraph_render(child_tag, base_url=base_url): if block_content != {}: tag_content_content.append(block_content) elif child_tag.name == "fig" and tag.name == "fig-group": # Do not fig inside fig-group a second time pass elif child_tag.name == "media" and tag.name == "fig-group": # Do not include a media video inside fig-group a second time if child_tag.get("mimetype") == "video": pass else: for block_content in body_block_content_render(child_tag, recursive=True, base_url=base_url): if block_content != {}: tag_content_content.append(block_content) if len(tag_content_content) > 0: if tag.name in nodenames or recursive is False: tag_content["content"] = [] for block_content in tag_content_content: tag_content["content"].append(block_content) block_content_list.append(tag_content) else: # Not a block tag, e.g. a caption tag, let the content pass through block_content_list = tag_content_content else: block_content_list.append(tag_content) return block_content_list
Render the tag as body content and call recursively if the tag has child tags
entailment
def body_block_paragraph_render(p_tag, html_flag=True, base_url=None): """ paragraphs may wrap some other body block content this is separated out so it can be called from more than one place """ # Configure the XML to HTML conversion preference for shorthand use below convert = lambda xml_string: xml_to_html(html_flag, xml_string, base_url) block_content_list = [] tag_content_content = [] nodenames = body_block_nodenames() paragraph_content = u'' for child_tag in p_tag: if child_tag.name is None or body_block_content(child_tag) == {}: paragraph_content = paragraph_content + unicode_value(child_tag) else: # Add previous paragraph content first if paragraph_content.strip() != '': tag_content_content.append(body_block_paragraph_content(convert(paragraph_content))) paragraph_content = u'' if child_tag.name is not None and body_block_content(child_tag) != {}: for block_content in body_block_content_render(child_tag, base_url=base_url): if block_content != {}: tag_content_content.append(block_content) # finish up if paragraph_content.strip() != '': tag_content_content.append(body_block_paragraph_content(convert(paragraph_content))) if len(tag_content_content) > 0: for block_content in tag_content_content: block_content_list.append(block_content) return block_content_list
paragraphs may wrap some other body block content this is separated out so it can be called from more than one place
entailment
def body_block_caption_render(caption_tags, base_url=None): """fig and media tag captions are similar so use this common function""" caption_content = [] supplementary_material_tags = [] for block_tag in remove_doi_paragraph(caption_tags): # Note then skip p tags with supplementary-material inside if raw_parser.supplementary_material(block_tag): for supp_tag in raw_parser.supplementary_material(block_tag): supplementary_material_tags.append(supp_tag) continue for block_content in body_block_content_render(block_tag, base_url=base_url): if block_content != {}: caption_content.append(block_content) return caption_content, supplementary_material_tags
fig and media tag captions are similar so use this common function
entailment
def body_block_supplementary_material_render(supp_tags, base_url=None): """fig and media tag caption may have supplementary material""" source_data = [] for supp_tag in supp_tags: for block_content in body_block_content_render(supp_tag, base_url=base_url): if block_content != {}: if "content" in block_content: del block_content["content"] source_data.append(block_content) return source_data
fig and media tag caption may have supplementary material
entailment
def body_block_paragraph_content(text): "for formatting of simple paragraphs of text only, and check if it is all whitespace" tag_content = OrderedDict() if text and text != '': tag_content["type"] = "paragraph" tag_content["text"] = clean_whitespace(text) return tag_content
for formatting of simple paragraphs of text only, and check if it is all whitespace
entailment
def body_block_image_content(tag): "format a graphic or inline-graphic into a body block json format" image_content = OrderedDict() if tag: copy_attribute(tag.attrs, 'xlink:href', image_content, 'uri') if "uri" in image_content: # todo!! alt set_if_value(image_content, "alt", "") return image_content
format a graphic or inline-graphic into a body block json format
entailment
def body_block_title_label_caption(tag_content, title_value, label_value, caption_content, set_caption=True, prefer_title=False, prefer_label=False): """set the title, label and caption values in a consistent way set_caption: insert a "caption" field prefer_title: when only one value is available, set title rather than label. If False, set label rather than title""" set_if_value(tag_content, "label", rstrip_punctuation(label_value)) set_if_value(tag_content, "title", title_value) if set_caption is True and caption_content and len(caption_content) > 0: tag_content["caption"] = caption_content if prefer_title: if "title" not in tag_content and label_value: set_if_value(tag_content, "title", label_value) del(tag_content["label"]) if prefer_label: if "label" not in tag_content and title_value: set_if_value(tag_content, "label", rstrip_punctuation(title_value)) del(tag_content["title"])
set the title, label and caption values in a consistent way set_caption: insert a "caption" field prefer_title: when only one value is available, set title rather than label. If False, set label rather than title
entailment
def body_block_attribution(tag): "extract the attribution content for figures, tables, videos" attributions = [] if raw_parser.attrib(tag): for attrib_tag in raw_parser.attrib(tag): attributions.append(node_contents_str(attrib_tag)) if raw_parser.permissions(tag): # concatenate content from from the permissions tag for permissions_tag in raw_parser.permissions(tag): attrib_string = '' # add the copyright statement if found attrib_string = join_sentences(attrib_string, node_contents_str(raw_parser.copyright_statement(permissions_tag)), '.') # add the license paragraphs if raw_parser.licence_p(permissions_tag): for licence_p_tag in raw_parser.licence_p(permissions_tag): attrib_string = join_sentences(attrib_string, node_contents_str(licence_p_tag), '.') if attrib_string != '': attributions.append(attrib_string) return attributions
extract the attribution content for figures, tables, videos
entailment
def body_blocks(soup): """ Note: for some reason this works and few other attempted methods work Search for certain node types, find the first nodes siblings of the same type Add the first sibling and the other siblings to a list and return them """ nodenames = body_block_nodenames() body_block_tags = [] if not soup: return body_block_tags first_sibling_node = firstnn(soup.find_all()) if first_sibling_node is None: return body_block_tags sibling_tags = first_sibling_node.find_next_siblings(nodenames) # Add the first component tag and the ResultSet tags together body_block_tags.append(first_sibling_node) for tag in sibling_tags: body_block_tags.append(tag) return body_block_tags
Note: for some reason this works and few other attempted methods work Search for certain node types, find the first nodes siblings of the same type Add the first sibling and the other siblings to a list and return them
entailment
def abstract_json(soup): """abstract in article json format""" abstract_tags = raw_parser.abstract(soup) abstract_json = None for tag in abstract_tags: if tag.get("abstract-type") is None: abstract_json = render_abstract_json(tag) return abstract_json
abstract in article json format
entailment
def digest_json(soup): """digest in article json format""" abstract_tags = raw_parser.abstract(soup, abstract_type="executive-summary") abstract_json = None for tag in abstract_tags: abstract_json = render_abstract_json(tag) return abstract_json
digest in article json format
entailment
def author_affiliations(author, html_flag=True): """compile author affiliations for json output""" # Configure the XML to HTML conversion preference for shorthand use below convert = lambda xml_string: xml_to_html(html_flag, xml_string) affilations = [] if author.get("affiliations"): for affiliation in author.get("affiliations"): affiliation_json = OrderedDict() affiliation_json["name"] = [] if affiliation.get("dept"): affiliation_json["name"].append(convert(affiliation.get("dept"))) if affiliation.get("institution") and affiliation.get("institution").strip() != '': affiliation_json["name"].append(convert(affiliation.get("institution"))) # Remove if empty if affiliation_json["name"] == []: del affiliation_json["name"] if ((affiliation.get("city") and affiliation.get("city").strip() != '') or affiliation.get("country") and affiliation.get("country").strip() != ''): affiliation_address = OrderedDict() affiliation_address["formatted"] = [] affiliation_address["components"] = OrderedDict() if affiliation.get("city") and affiliation.get("city").strip() != '': affiliation_address["formatted"].append(affiliation.get("city")) affiliation_address["components"]["locality"] = [] affiliation_address["components"]["locality"].append(affiliation.get("city")) if affiliation.get("country") and affiliation.get("country").strip() != '': affiliation_address["formatted"].append(affiliation.get("country")) affiliation_address["components"]["country"] = affiliation.get("country") # Add if not empty if affiliation_address != {}: affiliation_json["address"] = affiliation_address # Add if not empty if affiliation_json != {}: affilations.append(affiliation_json) if affilations != []: return affilations else: return None
compile author affiliations for json output
entailment
def author_json_details(author, author_json, contributions, correspondence, competing_interests, equal_contributions_map, present_address_data, foot_notes_data, html_flag=True): # Configure the XML to HTML conversion preference for shorthand use below convert = lambda xml_string: xml_to_html(html_flag, xml_string) """add more author json""" if author_affiliations(author): author_json["affiliations"] = author_affiliations(author) # foot notes or additionalInformation if author_foot_notes(author, foot_notes_data): author_json["additionalInformation"] = author_foot_notes(author, foot_notes_data) # email if author_email_addresses(author, correspondence): author_json["emailAddresses"] = author_email_addresses(author, correspondence) # phone if author_phone_numbers(author, correspondence): author_json["phoneNumbers"] = author_phone_numbers_json(author, correspondence) # contributions if author_contribution(author, contributions): author_json["contribution"] = convert(author_contribution(author, contributions)) # competing interests if author_competing_interests(author, competing_interests): author_json["competingInterests"] = convert( author_competing_interests(author, competing_interests)) # equal-contributions if author_equal_contribution(author, equal_contributions_map): author_json["equalContributionGroups"] = author_equal_contribution(author, equal_contributions_map) # postalAddress if author_present_address(author, present_address_data): author_json["postalAddresses"] = author_present_address(author, present_address_data) return author_json
add more author json
entailment
def collab_to_group_author_key_map(authors): """compile a map of author collab to group-author-key""" collab_map = {} for author in authors: if author.get("collab"): collab_map[author.get("collab")] = author.get("group-author-key") return collab_map
compile a map of author collab to group-author-key
entailment
def map_equal_contributions(contributors): """assign numeric values to each unique equal-contrib id""" equal_contribution_map = {} equal_contribution_keys = [] for contributor in contributors: if contributor.get("references") and "equal-contrib" in contributor.get("references"): for key in contributor["references"]["equal-contrib"]: if key not in equal_contribution_keys: equal_contribution_keys.append(key) # Do a basic sort equal_contribution_keys = sorted(equal_contribution_keys) # Assign keys based on sorted values for i, equal_contribution_key in enumerate(equal_contribution_keys): equal_contribution_map[equal_contribution_key] = i+1 return equal_contribution_map
assign numeric values to each unique equal-contrib id
entailment
def authors_json(soup): """authors list in article json format""" authors_json_data = [] contributors_data = contributors(soup, "full") author_contributions_data = author_contributions(soup, None) author_competing_interests_data = competing_interests(soup, None) author_correspondence_data = full_correspondence(soup) authors_non_byline_data = authors_non_byline(soup) equal_contributions_map = map_equal_contributions(contributors_data) present_address_data = present_addresses(soup) foot_notes_data = other_foot_notes(soup) # First line authors builds basic structure for contributor in contributors_data: author_json = None if contributor["type"] == "author" and contributor.get("collab"): author_json = author_group(contributor, author_contributions_data, author_correspondence_data, author_competing_interests_data, equal_contributions_map, present_address_data, foot_notes_data) elif contributor.get("on-behalf-of"): author_json = author_on_behalf_of(contributor) elif contributor["type"] == "author" and not contributor.get("group-author-key"): author_json = author_person(contributor, author_contributions_data, author_correspondence_data, author_competing_interests_data, equal_contributions_map, present_address_data, foot_notes_data) if author_json: authors_json_data.append(author_json) # Second, add byline author data collab_map = collab_to_group_author_key_map(contributors_data) for contributor in [elem for elem in contributors_data if elem.get("group-author-key") and not elem.get("collab")]: for group_author in [elem for elem in authors_json_data if elem.get('type') == 'group']: group_author_key = None if group_author["name"] in collab_map: group_author_key = collab_map[group_author["name"]] if contributor.get("group-author-key") == group_author_key: author_json = author_person(contributor, author_contributions_data, author_correspondence_data, author_competing_interests_data, equal_contributions_map, present_address_data, foot_notes_data) if contributor.get("sub-group"): if "groups" not in group_author: group_author["groups"] = OrderedDict() if contributor.get("sub-group") not in group_author["groups"]: group_author["groups"][contributor.get("sub-group")] = [] group_author["groups"][contributor.get("sub-group")].append(author_json) else: if "people" not in group_author: group_author["people"] = [] group_author["people"].append(author_json) authors_json_data_rewritten = elifetools.json_rewrite.rewrite_json("authors_json", soup, authors_json_data) return authors_json_data_rewritten
authors list in article json format
entailment
def author_line(soup): """take preferred names from authors json and format them into an author line""" author_line = None authors_json_data = authors_json(soup) author_names = extract_author_line_names(authors_json_data) if len(author_names) > 0: author_line = format_author_line(author_names) return author_line
take preferred names from authors json and format them into an author line
entailment
def format_author_line(author_names): """authorLine format depends on if there is 1, 2 or more than 2 authors""" author_line = None if not author_names: return author_line if len(author_names) <= 2: author_line = ", ".join(author_names) elif len(author_names) > 2: author_line = author_names[0] + " et al." return author_line
authorLine format depends on if there is 1, 2 or more than 2 authors
entailment
def references_date(year=None): "Handle year value parsing for some edge cases" date = None discriminator = None in_press = None if year and "in press" in year.lower().strip(): in_press = True elif year and re.match("^[0-9]+$", year): date = year elif year: discriminator_match = re.match("^([0-9]+?)([a-z]+?)$", year) if discriminator_match: date = discriminator_match.group(1) discriminator = discriminator_match.group(2) else: date = year return (date, discriminator, in_press)
Handle year value parsing for some edge cases
entailment
def references_json_authors(ref_authors, ref_content): "build the authors for references json here for testability" all_authors = references_authors(ref_authors) if all_authors != {}: if ref_content.get("type") in ["conference-proceeding", "journal", "other", "periodical", "preprint", "report", "web"]: for author_type in ["authors", "authorsEtAl"]: set_if_value(ref_content, author_type, all_authors.get(author_type)) elif ref_content.get("type") in ["book", "book-chapter"]: for author_type in ["authors", "authorsEtAl", "editors", "editorsEtAl"]: set_if_value(ref_content, author_type, all_authors.get(author_type)) elif ref_content.get("type") in ["clinical-trial"]: # Always set as authors, once, then add the authorsType for author_type in ["authors", "collaborators", "sponsors"]: if "authorsType" not in ref_content and all_authors.get(author_type): set_if_value(ref_content, "authors", all_authors.get(author_type)) set_if_value(ref_content, "authorsEtAl", all_authors.get(author_type + "EtAl")) ref_content["authorsType"] = author_type elif ref_content.get("type") in ["data", "software"]: for author_type in ["authors", "authorsEtAl", "compilers", "compilersEtAl", "curators", "curatorsEtAl"]: set_if_value(ref_content, author_type, all_authors.get(author_type)) elif ref_content.get("type") in ["patent"]: for author_type in ["inventors", "inventorsEtAl", "assignees", "assigneesEtAl"]: set_if_value(ref_content, author_type, all_authors.get(author_type)) elif ref_content.get("type") in ["thesis"]: # Convert list to a non-list if all_authors.get("authors") and len(all_authors.get("authors")) > 0: ref_content["author"] = all_authors.get("authors")[0] return ref_content
build the authors for references json here for testability
entailment
def convert_references_json(ref_content, soup=None): "Check for references that will not pass schema validation, fix or convert them to unknown" # Convert reference to unkonwn if still missing important values if ( (ref_content.get("type") == "other") or (ref_content.get("type") == "book-chapter" and "editors" not in ref_content) or (ref_content.get("type") == "journal" and "articleTitle" not in ref_content) or (ref_content.get("type") in ["journal", "book-chapter"] and not "pages" in ref_content) or (ref_content.get("type") == "journal" and "journal" not in ref_content) or (ref_content.get("type") in ["book", "book-chapter", "report", "thesis", "software"] and "publisher" not in ref_content) or (ref_content.get("type") == "book" and "bookTitle" not in ref_content) or (ref_content.get("type") == "data" and "source" not in ref_content) or (ref_content.get("type") == "conference-proceeding" and "conference" not in ref_content) ): ref_content = references_json_to_unknown(ref_content, soup) return ref_content
Check for references that will not pass schema validation, fix or convert them to unknown
entailment
def references_json_unknown_details(ref_content, soup=None): "Extract detail value for references of type unknown" details = "" # Try adding pages values first if "pages" in ref_content: if "range" in ref_content["pages"]: details += ref_content["pages"]["range"] else: details += ref_content["pages"] if soup: # Attempt to find the XML element by id, and convert it to details if "id" in ref_content: ref_tag = first(soup.select("ref#" + ref_content["id"])) if ref_tag: # Now remove tags that would be already part of the unknown reference by now for remove_tag in ["person-group", "year", "article-title", "elocation-id", "fpage", "lpage"]: ref_tag = remove_tag_from_tag(ref_tag, remove_tag) # Add the remaining tag content comma separated for tag in first(raw_parser.element_citation(ref_tag)): if node_text(tag) is not None: if details != "": details += ", " details += node_text(tag) if details == "": return None else: return details
Extract detail value for references of type unknown
entailment
def unwrap_appendix_box(json_content): """for use in removing unwanted boxed-content from appendices json""" if json_content.get("content") and len(json_content["content"]) > 0: first_block = json_content["content"][0] if (first_block.get("type") and first_block.get("type") == "box" and first_block.get("content")): if first_block.get("doi") and not json_content.get("doi"): json_content["doi"] = first_block.get("doi") json_content["content"] = first_block["content"] return json_content
for use in removing unwanted boxed-content from appendices json
entailment
def extract_schemas_from_file(source_path): """Extract schemas from 'source_path'. :returns: a list of ViewSchema objects on success, None if no schemas could be extracted. """ logging.info("Extracting schemas from %s", source_path) try: with open(source_path, 'r') as source_file: source = source_file.read() except (FileNotFoundError, PermissionError) as e: logging.error("Cannot extract schemas: %s", e.strerror) else: try: schemas = extract_schemas_from_source(source, source_path) except SyntaxError as e: logging.error("Cannot extract schemas: %s", str(e)) else: logging.info( "Extracted %d %s", len(schemas), "schema" if len(schemas) == 1 else "schemas") return schemas
Extract schemas from 'source_path'. :returns: a list of ViewSchema objects on success, None if no schemas could be extracted.
entailment
def _get_simple_assignments(tree): """Get simple assignments from node tree.""" result = {} for node in ast.walk(tree): if isinstance(node, ast.Assign): for target in node.targets: if isinstance(target, ast.Name): result[target.id] = node.value return result
Get simple assignments from node tree.
entailment
def extract_schemas_from_source(source, filename='<unknown>'): """Extract schemas from 'source'. The 'source' parameter must be a string, and should be valid python source. If 'source' is not valid python source, a SyntaxError will be raised. :returns: a list of ViewSchema objects. """ # Track which acceptable services have been configured. acceptable_services = set() # Track which acceptable views have been configured: acceptable_views = {} schemas_found = [] ast_tree = ast.parse(source, filename) simple_names = _get_simple_assignments(ast_tree) assigns = [n for n in ast_tree.body if isinstance(n, ast.Assign)] call_assigns = [n for n in assigns if isinstance(n.value, ast.Call)] # We need to extract the AcceptableService-related views. We parse the # assignations twice: The first time to extract the AcceptableService # instances, the second to extract the views created on those services. for assign in call_assigns: if isinstance(assign.value.func, ast.Attribute): continue if assign.value.func.id == 'AcceptableService': for target in assign.targets: acceptable_services.add(target.id) for assign in call_assigns: # only consider calls which are attribute accesses, AND # calls where the object being accessed is in acceptable_services, AND # calls where the attribute being accessed is the 'api' method. if isinstance(assign.value.func, ast.Attribute) and \ assign.value.func.value.id in acceptable_services and \ assign.value.func.attr == 'api': # this is a view. We need to extract the url and methods specified. # they may be specified positionally or via a keyword. url = None name = None # methods has a default value: methods = ['GET'] # This is a view - the URL is the first positional argument: args = assign.value.args if len(args) >= 1: url = ast.literal_eval(args[0]) if len(args) >= 2: name = ast.literal_eval(args[1]) kwargs = assign.value.keywords for kwarg in kwargs: if kwarg.arg == 'url': url = ast.literal_eval(kwarg.value) if kwarg.arg == 'methods': methods = ast.literal_eval(kwarg.value) if kwarg.arg == 'view_name': name = ast.literal_eval(kwarg.value) if url and name: for target in assign.targets: acceptable_views[target.id] = { 'url': url, 'name': name, 'methods': methods, } # iterate over all functions, attempting to find the views. functions = [n for n in ast_tree.body if isinstance(n, ast.FunctionDef)] for function in functions: input_schema = None output_schema = None doc = ast.get_docstring(function) api_options_list = [] for decorator in function.decorator_list: if not isinstance(decorator, ast.Call): continue if isinstance(decorator.func, ast.Attribute): decorator_name = decorator.func.value.id # extract version this view was introduced at, which can be # specified as an arg or a kwarg: version = None for kwarg in decorator.keywords: if kwarg.arg == 'introduced_at': version = ast.literal_eval(kwarg.value) break if len(decorator.args) == 1: version = ast.literal_eval(decorator.args[0]) if decorator_name in acceptable_views: api_options = acceptable_views[decorator_name] api_options['version'] = version api_options_list.append(api_options) else: decorator_name = decorator.func.id if decorator_name == 'validate_body': _SimpleNamesResolver(simple_names).visit(decorator.args[0]) input_schema = ast.literal_eval(decorator.args[0]) if decorator_name == 'validate_output': _SimpleNamesResolver(simple_names).visit(decorator.args[0]) output_schema = ast.literal_eval(decorator.args[0]) for api_options in api_options_list: schema = ViewSchema( view_name=api_options['name'], version=api_options['version'], input_schema=input_schema, output_schema=output_schema, methods=api_options['methods'], url=api_options['url'], doc=doc, ) schemas_found.append(schema) return schemas_found
Extract schemas from 'source'. The 'source' parameter must be a string, and should be valid python source. If 'source' is not valid python source, a SyntaxError will be raised. :returns: a list of ViewSchema objects.
entailment
def render_value(value): """Render a value, ensuring that any nested dicts are sorted by key.""" if isinstance(value, list): return '[' + ', '.join(render_value(v) for v in value) + ']' elif isinstance(value, dict): return ( '{' + ', '.join('{k!r}: {v}'.format( k=k, v=render_value(v)) for k, v in sorted(value.items())) + '}') else: return repr(value)
Render a value, ensuring that any nested dicts are sorted by key.
entailment
def write_service_double_file(target_root, service_name, rendered): """Render syntactically valid python service double code.""" target_path = os.path.join( target_root, 'snapstore_schemas', 'service_doubles', '%s.py' % service_name ) with open(target_path, 'w') as target_file: target_file.write(rendered)
Render syntactically valid python service double code.
entailment
def clean_docstring(docstring): """Dedent docstring, special casing the first line.""" docstring = docstring.strip() if '\n' in docstring: # multiline docstring if docstring[0].isspace(): # whole docstring is indented return textwrap.dedent(docstring) else: # first line not indented, rest maybe first, _, rest = docstring.partition('\n') return first + '\n' + textwrap.dedent(rest) return docstring
Dedent docstring, special casing the first line.
entailment
def _sort_schema(schema): """Recursively sorts a JSON schema by dict key.""" if isinstance(schema, dict): for k, v in sorted(schema.items()): if isinstance(v, dict): yield k, OrderedDict(_sort_schema(v)) elif isinstance(v, list): yield k, list(_sort_schema(v)) else: yield k, v elif isinstance(schema, list): for v in schema: if isinstance(v, dict): yield OrderedDict(_sort_schema(v)) elif isinstance(v, list): yield list(_sort_schema(v)) else: yield v else: yield d
Recursively sorts a JSON schema by dict key.
entailment
def urlmap(patterns): """Recursively build a map of (group, name) => url patterns. Group is either the resolver namespace or app name for the url config. The urls are joined with any prefixes, and cleaned up of extraneous regex specific syntax.""" for pattern in patterns: group = getattr(pattern, 'namespace', None) if group is None: group = getattr(pattern, 'app_name', None) path = '/' + get_pattern(pattern).lstrip('^').rstrip('$') if isinstance(pattern, PATTERNS): yield (group, pattern.name), path elif isinstance(pattern, RESOLVERS): subpatterns = pattern.url_patterns for (_, name), subpath in urlmap(subpatterns): yield (group, name), path.rstrip('/') + subpath
Recursively build a map of (group, name) => url patterns. Group is either the resolver namespace or app name for the url config. The urls are joined with any prefixes, and cleaned up of extraneous regex specific syntax.
entailment
def get_field_schema(name, field): """Returns a JSON Schema representation of a form field.""" field_schema = { 'type': 'string', } if field.label: field_schema['title'] = str(field.label) # force translation if field.help_text: field_schema['description'] = str(field.help_text) # force translation if isinstance(field, (fields.URLField, fields.FileField)): field_schema['format'] = 'uri' elif isinstance(field, fields.EmailField): field_schema['format'] = 'email' elif isinstance(field, fields.DateTimeField): field_schema['format'] = 'date-time' elif isinstance(field, fields.DateField): field_schema['format'] = 'date' elif isinstance(field, (fields.DecimalField, fields.FloatField)): field_schema['type'] = 'number' elif isinstance(field, fields.IntegerField): field_schema['type'] = 'integer' elif isinstance(field, fields.NullBooleanField): field_schema['type'] = 'boolean' elif isinstance(field.widget, widgets.CheckboxInput): field_schema['type'] = 'boolean' if getattr(field, 'choices', []): field_schema['enum'] = sorted([choice[0] for choice in field.choices]) # check for multiple values if isinstance(field.widget, (widgets.Select, widgets.ChoiceWidget)): if field.widget.allow_multiple_selected: # promote to array of <type>, move details into the items field field_schema['items'] = { 'type': field_schema['type'], } if 'enum' in field_schema: field_schema['items']['enum'] = field_schema.pop('enum') field_schema['type'] = 'array' return field_schema
Returns a JSON Schema representation of a form field.
entailment
def get_form_schema(form): """Return a JSON Schema object for a Django Form.""" schema = { 'type': 'object', 'properties': {}, } for name, field in form.base_fields.items(): schema['properties'][name] = get_field_schema(name, field) if field.required: schema.setdefault('required', []).append(name) return schema
Return a JSON Schema object for a Django Form.
entailment
def handler(self, handler_class): """Link to an API handler class (e.g. piston or DRF).""" self.handler_class = handler_class # we take the docstring from the handler class, not the methods if self.docs is None and handler_class.__doc__: self.docs = clean_docstring(handler_class.__doc__) return handler_class
Link to an API handler class (e.g. piston or DRF).
entailment
def xml_to_html(html_flag, xml_string, base_url=None): "For formatting json output into HTML friendly format" if not xml_string or not html_flag is True: return xml_string html_string = xml_string html_string = remove_comment_tags(html_string) # Escape unmatched angle brackets if '<' in html_string or '>' in html_string: html_string = escape_html(html_string) # Replace more tags html_string = replace_xref_tags(html_string) html_string = replace_ext_link_tags(html_string) html_string = replace_email_tags(html_string) html_string = replace_inline_graphic_tags(html_string, base_url) html_string = replace_named_content_tags(html_string) html_string = replace_mathml_tags(html_string) html_string = replace_table_style_author_callout(html_string) html_string = replace_simple_tags(html_string, 'italic', 'i') html_string = replace_simple_tags(html_string, 'bold', 'b') html_string = replace_simple_tags(html_string, 'underline', 'span', '<span class="underline">') html_string = replace_simple_tags(html_string, 'sc', 'span', '<span class="small-caps">') html_string = replace_simple_tags(html_string, 'monospace', 'span', '<span class="monospace">') html_string = replace_simple_tags(html_string, 'inline-formula', None) html_string = replace_simple_tags(html_string, 'break', 'br') return html_string
For formatting json output into HTML friendly format
entailment
def replace_simple_tags(s, from_tag='italic', to_tag='i', to_open_tag=None): """ Replace tags such as <italic> to <i> This does not validate markup """ if to_open_tag: s = s.replace('<' + from_tag + '>', to_open_tag) elif to_tag: s = s.replace('<' + from_tag + '>', '<' + to_tag + '>') s = s.replace('<' + from_tag + '/>', '<' + to_tag + '/>') else: s = s.replace('<' + from_tag + '>', '') s = s.replace('<' + from_tag + '/>', '') if to_tag: s = s.replace('</' + from_tag + '>', '</' + to_tag + '>') else: s = s.replace('</' + from_tag + '>', '') return s
Replace tags such as <italic> to <i> This does not validate markup
entailment
def validate_body(schema): """Validate the body of incoming requests for a flask view. An example usage might look like this:: from snapstore_schemas import validate_body @validate_body({ 'type': 'array', 'items': { 'type': 'object', 'properties': { 'snap_id': {'type': 'string'}, 'series': {'type': 'string'}, 'name': {'type': 'string'}, 'title': {'type': 'string'}, 'keywords': { 'type': 'array', 'items': {'type': 'string'} }, 'summary': {'type': 'string'}, 'description': {'type': 'string'}, 'created_at': {'type': 'string'}, }, 'required': ['snap_id', 'series'], 'additionalProperties': False } }) def my_flask_view(): # view code here return "Hello World", 200 All incoming request that have been routed to this view will be matched against the specified schema. If the request body does not match the schema an instance of `DataValidationError` will be raised. By default this will cause the flask application to return a 500 response, but this can be customised by telling flask how to handle these exceptions. The exception instance has an 'error_list' attribute that contains a list of all the errors encountered while processing the request body. """ location = get_callsite_location() def decorator(fn): validate_schema(schema) wrapper = wrap_request(fn, schema) record_schemas( fn, wrapper, location, request_schema=sort_schema(schema)) return wrapper return decorator
Validate the body of incoming requests for a flask view. An example usage might look like this:: from snapstore_schemas import validate_body @validate_body({ 'type': 'array', 'items': { 'type': 'object', 'properties': { 'snap_id': {'type': 'string'}, 'series': {'type': 'string'}, 'name': {'type': 'string'}, 'title': {'type': 'string'}, 'keywords': { 'type': 'array', 'items': {'type': 'string'} }, 'summary': {'type': 'string'}, 'description': {'type': 'string'}, 'created_at': {'type': 'string'}, }, 'required': ['snap_id', 'series'], 'additionalProperties': False } }) def my_flask_view(): # view code here return "Hello World", 200 All incoming request that have been routed to this view will be matched against the specified schema. If the request body does not match the schema an instance of `DataValidationError` will be raised. By default this will cause the flask application to return a 500 response, but this can be customised by telling flask how to handle these exceptions. The exception instance has an 'error_list' attribute that contains a list of all the errors encountered while processing the request body.
entailment
def record_schemas( fn, wrapper, location, request_schema=None, response_schema=None): """Support extracting the schema from the decorated function.""" # have we already been decorated by an acceptable api call? has_acceptable = hasattr(fn, '_acceptable_metadata') if request_schema is not None: # preserve schema for later use wrapper._request_schema = wrapper._request_schema = request_schema wrapper._request_schema_location = location if has_acceptable: fn._acceptable_metadata._request_schema = request_schema fn._acceptable_metadata._request_schema_location = location if response_schema is not None: # preserve schema for later use wrapper._response_schema = wrapper._response_schema = response_schema wrapper._response_schema_location = location if has_acceptable: fn._acceptable_metadata._response_schema = response_schema fn._acceptable_metadata._response_schema_location = location
Support extracting the schema from the decorated function.
entailment
def validate_output(schema): """Validate the body of a response from a flask view. Like `validate_body`, this function compares a json document to a jsonschema specification. However, this function applies the schema to the view response. Instead of the view returning a flask response object, it should instead return a Python list or dictionary. For example:: from snapstore_schemas import validate_output @validate_output({ 'type': 'object', 'properties': { 'ok': {'type': 'boolean'}, }, 'required': ['ok'], 'additionalProperties': False } def my_flask_view(): # view code here return {'ok': True} Every view response will be evaluated against the schema. Any that do not comply with the schema will cause DataValidationError to be raised. """ location = get_callsite_location() def decorator(fn): validate_schema(schema) wrapper = wrap_response(fn, schema) record_schemas( fn, wrapper, location, response_schema=sort_schema(schema)) return wrapper return decorator
Validate the body of a response from a flask view. Like `validate_body`, this function compares a json document to a jsonschema specification. However, this function applies the schema to the view response. Instead of the view returning a flask response object, it should instead return a Python list or dictionary. For example:: from snapstore_schemas import validate_output @validate_output({ 'type': 'object', 'properties': { 'ok': {'type': 'boolean'}, }, 'required': ['ok'], 'additionalProperties': False } def my_flask_view(): # view code here return {'ok': True} Every view response will be evaluated against the schema. Any that do not comply with the schema will cause DataValidationError to be raised.
entailment
def validate(payload, schema): """Validate `payload` against `schema`, returning an error list. jsonschema provides lots of information in it's errors, but it can be a bit of work to extract all the information. """ v = jsonschema.Draft4Validator( schema, format_checker=jsonschema.FormatChecker()) error_list = [] for error in v.iter_errors(payload): message = error.message location = '/' + '/'.join([str(c) for c in error.absolute_path]) error_list.append(message + ' at ' + location) return error_list
Validate `payload` against `schema`, returning an error list. jsonschema provides lots of information in it's errors, but it can be a bit of work to extract all the information.
entailment
def connect(url, max_retries=None, **kwargs): """Connects to a Phoenix query server. :param url: URL to the Phoenix query server, e.g. ``http://localhost:8765/`` :param autocommit: Switch the connection to autocommit mode. :param readonly: Switch the connection to readonly mode. :param max_retries: The maximum number of retries in case there is a connection error. :param cursor_factory: If specified, the connection's :attr:`~phoenixdb.connection.Connection.cursor_factory` is set to it. :returns: :class:`~phoenixdb.connection.Connection` object. """ client = AvaticaClient(url, max_retries=max_retries) client.connect() return Connection(client, **kwargs)
Connects to a Phoenix query server. :param url: URL to the Phoenix query server, e.g. ``http://localhost:8765/`` :param autocommit: Switch the connection to autocommit mode. :param readonly: Switch the connection to readonly mode. :param max_retries: The maximum number of retries in case there is a connection error. :param cursor_factory: If specified, the connection's :attr:`~phoenixdb.connection.Connection.cursor_factory` is set to it. :returns: :class:`~phoenixdb.connection.Connection` object.
entailment
def connect(self): """Opens a HTTP connection to the RPC server.""" logger.debug("Opening connection to %s:%s", self.url.hostname, self.url.port) try: self.connection = httplib.HTTPConnection(self.url.hostname, self.url.port) self.connection.connect() except (httplib.HTTPException, socket.error) as e: raise errors.InterfaceError('Unable to connect to the specified service', e)
Opens a HTTP connection to the RPC server.
entailment
def close(self): """Closes the HTTP connection to the RPC server.""" if self.connection is not None: logger.debug("Closing connection to %s:%s", self.url.hostname, self.url.port) try: self.connection.close() except httplib.HTTPException: logger.warning("Error while closing connection", exc_info=True) self.connection = None
Closes the HTTP connection to the RPC server.
entailment
def connection_sync(self, connection_id, connProps=None): """Synchronizes connection properties with the server. :param connection_id: ID of the current connection. :param connProps: Dictionary with the properties that should be changed. :returns: A ``common_pb2.ConnectionProperties`` object. """ if connProps is None: connProps = {} request = requests_pb2.ConnectionSyncRequest() request.connection_id = connection_id request.conn_props.auto_commit = connProps.get('autoCommit', False) request.conn_props.has_auto_commit = True request.conn_props.read_only = connProps.get('readOnly', False) request.conn_props.has_read_only = True request.conn_props.transaction_isolation = connProps.get('transactionIsolation', 0) request.conn_props.catalog = connProps.get('catalog', '') request.conn_props.schema = connProps.get('schema', '') response_data = self._apply(request) response = responses_pb2.ConnectionSyncResponse() response.ParseFromString(response_data) return response.conn_props
Synchronizes connection properties with the server. :param connection_id: ID of the current connection. :param connProps: Dictionary with the properties that should be changed. :returns: A ``common_pb2.ConnectionProperties`` object.
entailment
def open_connection(self, connection_id, info=None): """Opens a new connection. :param connection_id: ID of the connection to open. """ request = requests_pb2.OpenConnectionRequest() request.connection_id = connection_id if info is not None: # Info is a list of repeated pairs, setting a dict directly fails for k, v in info.items(): request.info[k] = v response_data = self._apply(request) response = responses_pb2.OpenConnectionResponse() response.ParseFromString(response_data)
Opens a new connection. :param connection_id: ID of the connection to open.
entailment
def close_connection(self, connection_id): """Closes a connection. :param connection_id: ID of the connection to close. """ request = requests_pb2.CloseConnectionRequest() request.connection_id = connection_id self._apply(request)
Closes a connection. :param connection_id: ID of the connection to close.
entailment
def create_statement(self, connection_id): """Creates a new statement. :param connection_id: ID of the current connection. :returns: New statement ID. """ request = requests_pb2.CreateStatementRequest() request.connection_id = connection_id response_data = self._apply(request) response = responses_pb2.CreateStatementResponse() response.ParseFromString(response_data) return response.statement_id
Creates a new statement. :param connection_id: ID of the current connection. :returns: New statement ID.
entailment
def close_statement(self, connection_id, statement_id): """Closes a statement. :param connection_id: ID of the current connection. :param statement_id: ID of the statement to close. """ request = requests_pb2.CloseStatementRequest() request.connection_id = connection_id request.statement_id = statement_id self._apply(request)
Closes a statement. :param connection_id: ID of the current connection. :param statement_id: ID of the statement to close.
entailment
def prepare_and_execute(self, connection_id, statement_id, sql, max_rows_total=None, first_frame_max_size=None): """Prepares and immediately executes a statement. :param connection_id: ID of the current connection. :param statement_id: ID of the statement to prepare. :param sql: SQL query. :param max_rows_total: The maximum number of rows that will be allowed for this query. :param first_frame_max_size: The maximum number of rows that will be returned in the first Frame returned for this query. :returns: Result set with the signature of the prepared statement and the first frame data. """ request = requests_pb2.PrepareAndExecuteRequest() request.connection_id = connection_id request.statement_id = statement_id request.sql = sql if max_rows_total is not None: request.max_rows_total = max_rows_total if first_frame_max_size is not None: request.first_frame_max_size = first_frame_max_size response_data = self._apply(request, 'ExecuteResponse') response = responses_pb2.ExecuteResponse() response.ParseFromString(response_data) return response.results
Prepares and immediately executes a statement. :param connection_id: ID of the current connection. :param statement_id: ID of the statement to prepare. :param sql: SQL query. :param max_rows_total: The maximum number of rows that will be allowed for this query. :param first_frame_max_size: The maximum number of rows that will be returned in the first Frame returned for this query. :returns: Result set with the signature of the prepared statement and the first frame data.
entailment